diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 30cfe01..7e268fd 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -14,94 +14,88 @@ // The optional 'workspaceFolder' property is the path VS Code should open by default when // connected. This is typically a file mount in .devcontainer/docker-compose.yml "workspaceFolder": "/app", - // Set *default* container specific settings.json values on container create. - "settings": { - "terminal.integrated.shell.linux": null, - // https://github.com/golang/tools/blob/master/gopls/doc/vscode.md#vscode - "go.useLanguageServer": true, - "[go]": { - "editor.formatOnSave": true, - "editor.codeActionsOnSave": { - "source.organizeImports": true, + // All containers should stop if we close / reload the VSCode window. + "shutdownAction": "stopCompose", + "customizations": { + "vscode": { + // Set *default* container specific settings.json values on container create. + "settings": { + // https://github.com/golang/tools/blob/master/gopls/doc/vscode.md#vscode + "go.useLanguageServer": true, + "[go]": { + "editor.formatOnSave": true, + "editor.codeActionsOnSave": { + "source.organizeImports": true, + }, + // Optional: Disable snippets, as they conflict with completion ranking. + "editor.snippetSuggestions": "none", + }, + "[go.mod]": { + "editor.formatOnSave": true, + "editor.codeActionsOnSave": { + "source.organizeImports": true, + }, + }, + "[sql]": { + "editor.formatOnSave": true + }, + "gopls": { + // Add parameter placeholders when completing a function. + "usePlaceholders": true, + // If true, enable additional analyses with staticcheck. + // Warning: This will significantly increase memory usage. + // DISABLED, done via + "staticcheck": false, + }, + // https://code.visualstudio.com/docs/languages/go#_intellisense + "go.autocompleteUnimportedPackages": true, + // https://github.com/golangci/golangci-lint#editor-integration + "go.lintTool": "golangci-lint", + "go.lintFlags": [ + "--fast", + "--timeout", + "5m" + ], + // disable test caching, race and show coverage (in sync with makefile) + "go.testFlags": [ + "-cover", + "-race", + "-count=1", + "-v" + ], + "go.coverMode": "atomic", // atomic is required when utilizing -race + "go.delveConfig": { + "dlvLoadConfig": { + // increase max length of strings displayed in debugger + "maxStringLen": 2048, + }, + "apiVersion": 2, + }, + // ensure that the pgFormatter VSCode extension uses the pgFormatter that comes preinstalled in the Dockerfile + "pgFormatter.pgFormatterPath": "/usr/local/bin/pg_format" }, - // Optional: Disable snippets, as they conflict with completion ranking. - "editor.snippetSuggestions": "none", - }, - "[go.mod]": { - "editor.formatOnSave": true, - "editor.codeActionsOnSave": { - "source.organizeImports": true, - }, - }, - "[sql]": { - "editor.formatOnSave": true - }, - "gopls": { - // Add parameter placeholders when completing a function. - "usePlaceholders": true, - // If true, enable additional analyses with staticcheck. - // Warning: This will significantly increase memory usage. - // DISABLED, done via - "staticcheck": false, - }, - // https://code.visualstudio.com/docs/languages/go#_intellisense - "go.autocompleteUnimportedPackages": true, - // https://github.com/golangci/golangci-lint#editor-integration - "go.lintTool": "golangci-lint", - "go.lintFlags": [ - "--fast" - ], - // disable test caching, race and show coverage (in sync with makefile) - "go.testFlags": [ - "-cover", - "-race", - "-count=1", - "-v" - ], - // "go.lintOnSave": "workspace" - // general build settings in sync with our makefile - // "go.buildFlags": [ - // "-o", - // "bin/app" - // ] - // "sqltools.connections": [ - // { - // "database": "sample", - // "dialect": "PostgreSQL", - // "name": "postgres", - // "password": "9bed16f749d74a3c8bfbced18a7647f5", - // "port": 5432, - // "server": "postgres", - // "username": "dbuser" - // } - // ], - // "sqltools.autoConnectTo": [ - // "postgres" - // ], - // // only use pg_format to actually format! - // "sqltools.formatLanguages": [], - // "sqltools.telemetry": false, - // "sqltools.autoOpenSessionFiles": false + // Add the IDs of extensions you want installed when the container is created. + "extensions": [ + // required: + "golang.go", + "bradymholt.pgformatter", + // optional: + "42crunch.vscode-openapi", + "heaths.vscode-guid", + "bungcip.better-toml", + "eamodio.gitlens", + "casualjim.gotemplate", + "yzhang.markdown-all-in-one" + ] + } }, - // Add the IDs of extensions you want installed when the container is created. - "extensions": [ - // required: - "ms-vscode.go", - "bradymholt.pgformatter", - // optional: - // "766b.go-outliner", - "heaths.vscode-guid", - "bungcip.better-toml", - "eamodio.gitlens", - "casualjim.gotemplate" - // "mtxr.sqltools", - ] // Uncomment the next line if you want start specific services in your Docker Compose config. // "runServices": [], // Uncomment the next line if you want to keep your containers running after VS Code shuts down. // "shutdownAction": "none", // Uncomment the next line to run commands after the container is created - for example installing git. + "postCreateCommand": "go version", // "postCreateCommand": "apt-get update && apt-get install -y git", // Uncomment to connect as a non-root user. See https://aka.ms/vscode-remote/containers/non-root. - // "remoteUser": "vscode" + // "remoteUser": "" } \ No newline at end of file diff --git a/.dockerignore b/.dockerignore index 21c5356..e3c57a2 100644 --- a/.dockerignore +++ b/.dockerignore @@ -5,4 +5,5 @@ .tools-versions Dockerfile docker-compose.* -docker-helper.sh \ No newline at end of file +docker-helper.sh +docs diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 0000000..ec24175 --- /dev/null +++ b/.drone.yml @@ -0,0 +1,357 @@ +# ----------------------------------------------------------------------------- +# SETTINGS +# ----------------------------------------------------------------------------- + +# Drone matrix: Additional ENV vars for substitution - http://docs.drone.io/matrix-builds/ +# Will be evaluated BEFORE the YAML is parsed, ONLY strings allowed, NO substitutions ${XXX} here. + +matrix: + include: + - BUILD_ENV: all + # The name of the k8s namespaces that these pipelines will target. + # K8S_DEPLOY_NS_DEV: + # K8S_DEPLOY_NS_STAGING: + # K8S_DEPLOY_NS_PRODUCTION: + +# YAML Configuration anchors - https://learnxinyminutes.com/docs/yaml/ +# Will be evaluated WHILE the YAML is parsed, any valid yaml allowed, substitutions ${XXX} allowed. + +alias: + # The image will be tagged with this, pushed to gcr and referenced with this key in the k8s deployment + - &IMAGE_DEPLOY_TAG ${DRONE_COMMIT_SHA} + + # The image name, defaults to lowercase repo name /, e.g. aw/aaa-cab-kubernetes-test + - &IMAGE_DEPLOY_NAME ${DRONE_REPO,,} + + # The intermediate builder image name + - &IMAGE_BUILDER_ID ${DRONE_REPO,,}-builder:${DRONE_COMMIT_SHA} + + # The full uniquely tagged app image name + - &IMAGE_DEPLOY_ID ${DRONE_REPO,,}:${DRONE_COMMIT_SHA} + + # Defines which branches will trigger a docker image push our Google Cloud Registry (tags are always published) + - &GCR_PUBLISH_BRANCHES [dev, master, aj/pooling-improvements, mr/aj-review] + + # Docker registry publish default settings + - &GCR_REGISTRY_SETTINGS + image: plugins/gcr + repo: a3cloud-192413/${DRONE_REPO,,} + registry: eu.gcr.io + secrets: + - source: AAA_GCR_SERVICE_ACCOUNT_JSON + target: google_credentials + # local short-time-cache: don't cleanup any image layers after pushing + purge: false + # force compress of docker build context + compress: true + volumes: # mount needed to push the already build container + - /var/run/docker.sock:/var/run/docker.sock + + # # Deployment default settings + # - &K8S_DEPLOY_SETTINGS + # image: eu.gcr.io/a3cloud-192413/aw/aaa-drone-kubernetes:latest + # pull: true + # secrets: + # - source: AAA_K8S_SERVER + # target: KUBERNETES_SERVER + # - source: AAA_K8S_SERVICE_ACCOUNT_CRT + # target: KUBERNETES_CERT + # - source: AAA_K8S_SERVICE_ACCOUNT_TOKEN + # target: KUBERNETES_TOKEN + # - source: AAA_GCR_SERVICE_ACCOUNT_JSON + # target: GCR_SERVICE_ACCOUNT + # deployment: app + # repo: eu.gcr.io/a3cloud-192413/${DRONE_REPO,,} + # container: [app] + # tag: *IMAGE_DEPLOY_TAG + # gcr_service_account_email: drone-ci-a3cloud@a3cloud-192413.iam.gserviceaccount.com + # mgmt_repo: https://git.allaboutapps.at/scm/aw/a3cloud-mgmt.git + # mgmt_git_email: infrastructure+drone@allaboutapps.at + + # ENV variables for executing within the test env (similar to the env in docker-compose.yml) + - &TEST_ENV + CI: ${CI} + + # required: env for main working database, service + # default for sql-migrate (target development) and psql cli tool + PGDATABASE: &PGDATABASE "development" + PGUSER: &PGUSER "dbuser" + PGPASSWORD: &PGPASSWORD "dbpass" + PGHOST: &PGHOST "postgres" + PGPORT: &PGPORT "5432" + PGSSLMODE: &PGSSLMODE "disable" + + # optional: env for sql-boiler (ability to generate models out of a "spec" database) + # sql-boiler should operate on a "spec" database only + PSQL_DBNAME: "spec" + PSQL_USER: *PGUSER + PSQL_PASS: *PGPASSWORD + PSQL_HOST: *PGHOST + PSQL_PORT: *PGPORT + PSQL_SSLMODE: *PGSSLMODE + + # required for drone: project root directory, used for relative path resolution (e.g. fixtures) + PROJECT_ROOT_DIR: /app + + # docker run related. + SERVER_MANAGEMENT_SECRET: "mgmt-secret" + + # Which build events should trigger the main pipeline (defaults to all) + - &BUILD_EVENTS [push, pull_request, tag] + + # Pipeline merge helper: only execute if build event received + - &WHEN_BUILD_EVENT + when: + event: *BUILD_EVENTS + +# The actual pipeline building our product +pipeline: + # --------------------------------------------------------------------------- + # BUILD + # --------------------------------------------------------------------------- + + "database connection": + group: build + image: postgres:12.4-alpine # should be the same version as used in .drone.yml, .github/workflows, Dockerfile and live + commands: + # wait for postgres service to become available + - | + until psql -U $PGUSER -d $PGDATABASE -h postgres \ + -c "SELECT 1;" >/dev/null 2>&1; do sleep 1; done + # query the database + - | + psql -U $PGUSER -d $PGDATABASE -h postgres \ + -c "SELECT name, setting FROM pg_settings;" + environment: *TEST_ENV + <<: *WHEN_BUILD_EVENT + + "docker build (target builder)": + group: build + image: docker:latest + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + IMAGE_TAG: *IMAGE_BUILDER_ID + commands: + - "docker build --target builder --compress -t $${IMAGE_TAG} ." + <<: *WHEN_BUILD_EVENT + + "docker build (target integresql)": + group: build-app + image: docker:latest + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + IMAGE_TAG: *IMAGE_DEPLOY_ID + commands: + - "docker build --target integresql --compress -t $${IMAGE_TAG} ." + <<: *WHEN_BUILD_EVENT + + # --------------------------------------------------------------------------- + # CHECK + # --------------------------------------------------------------------------- + + "trivy scan": + group: pre-test + image: aquasec/trivy:latest + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /server/drone/trivy-cache:/root/.cache/ + environment: + IMAGE_TAG: *IMAGE_DEPLOY_ID + commands: + # Print report + - "trivy image --exit-code 0 --no-progress $${IMAGE_TAG}" + # Fail on severity HIGH and CRITICAL + - "trivy image --exit-code 1 --severity HIGH,CRITICAL --no-progress --ignore-unfixed $${IMAGE_TAG}" + <<: *WHEN_BUILD_EVENT + + "build & diff": + group: pre-test + image: *IMAGE_BUILDER_ID + commands: + - cd $PROJECT_ROOT_DIR # reuse go build cache from Dockerfile builder stage + - make tidy + - make build + - /bin/cp -Rf $PROJECT_ROOT_DIR/* $DRONE_WORKSPACE # switch back to drone workspace ... + - cd $DRONE_WORKSPACE + - "git diff --exit-code" # ... for git diffing (otherwise not possible as .git is .dockerignored) + environment: *TEST_ENV + <<: *WHEN_BUILD_EVENT + + "info": + group: test + image: *IMAGE_BUILDER_ID + commands: + - cd $PROJECT_ROOT_DIR # reuse go build cache from Dockerfile builder stage + - make info + environment: *TEST_ENV + <<: *WHEN_BUILD_EVENT + + "test": + group: test + image: *IMAGE_BUILDER_ID + commands: + - cd $PROJECT_ROOT_DIR # reuse go build cache from Dockerfile builder stage + - make test + environment: *TEST_ENV + <<: *WHEN_BUILD_EVENT + + # "swagger-codegen-cli": + # group: test + # # https://github.com/swagger-api/swagger-codegen/blob/master/modules/swagger-codegen-cli/Dockerfile + # image: swaggerapi/swagger-codegen-cli + # commands: + # # run the main swagger.yml validation. + # - "java -jar /opt/swagger-codegen-cli/swagger-codegen-cli.jar validate -i ./api/swagger.yml" + # <<: *WHEN_BUILD_EVENT + + "binary: deps": + group: test + image: *IMAGE_BUILDER_ID + commands: + - cd $PROJECT_ROOT_DIR + - make get-embedded-modules-count + - make get-embedded-modules + environment: *TEST_ENV + <<: *WHEN_BUILD_EVENT + + "binary: licenses": + group: test + image: *IMAGE_BUILDER_ID + commands: + - cd $PROJECT_ROOT_DIR + - make get-licenses + environment: *TEST_ENV + <<: *WHEN_BUILD_EVENT + + # "docker run (target app)": + # group: test + # image: docker:latest + # volumes: + # - /var/run/docker.sock:/var/run/docker.sock + # environment: + # <<: *TEST_ENV + # IMAGE_TAG: *IMAGE_DEPLOY_ID + # commands: + # # Note: NO network related tests are possible here, dnd can just + # # run sibling containers. We have no possibility to connect them + # # into the drone user defined per build docker network! + # # https://github.com/drone-plugins/drone-docker/issues/193 + # # https://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/ + # - (env | grep "^\S*=" | grep -v -e "DRONE=" -e "DRONE_" -e "CI_" -e "CI=" -e "HOME=" -e "HOSTNAME=" -e "SHELL=" -e "PWD=" -e "PATH=") > .hostenv + # - cat .hostenv + # - "docker run --env-file .hostenv $${IMAGE_TAG} help" + # - "docker run --env-file .hostenv $${IMAGE_TAG} -v" + # - "docker run --env-file .hostenv $${IMAGE_TAG} env" + # <<: *WHEN_BUILD_EVENT + + # --------------------------------------------------------------------------- + # PUBLISH + # --------------------------------------------------------------------------- + + # Built a allowed branch? Push to cloud registry + "publish ${DRONE_BRANCH}_${DRONE_COMMIT_SHA:0:10}": + group: publish + <<: *GCR_REGISTRY_SETTINGS + tags: + - build_${DRONE_BUILD_NUMBER} + - ${DRONE_BRANCH/\//-}_${DRONE_COMMIT_SHA:0:10} + - *IMAGE_DEPLOY_TAG + - latest + - ${DRONE_BRANCH/\//-} + - '${DRONE_COMMIT_SHA:0:10}' + when: + branch: *GCR_PUBLISH_BRANCHES + event: *BUILD_EVENTS + + # Built a tag? Push to cloud registry + "publish tag_${DRONE_COMMIT_SHA:0:10}": + group: publish + <<: *GCR_REGISTRY_SETTINGS + tags: + - build_${DRONE_BUILD_NUMBER} + - tag_${DRONE_COMMIT_SHA:0:10} + - *IMAGE_DEPLOY_TAG + - latest + - ${DRONE_TAG} + - ${DRONE_COMMIT_SHA:0:10} + when: + event: tag + + # --------------------------------------------------------------------------- + # DEPLOYMENT + # --------------------------------------------------------------------------- + + # # autodeploy dev if it hits the branch + # "deploy ${DRONE_COMMIT_SHA:0:10} to ${K8S_DEPLOY_NS_DEV} (auto)": + # <<: *K8S_DEPLOY_SETTINGS + # namespace: ${K8S_DEPLOY_NS_DEV} + # mgmt_deployment_yaml: namespaces/${K8S_DEPLOY_NS_DEV}/app.deployment.yaml + # when: + # event: *BUILD_EVENTS + # branch: [dev] + + # # promote dev through "drone deploy dev" + # "deploy ${DRONE_COMMIT_SHA:0:10} to ${K8S_DEPLOY_NS_DEV} (promote)": + # <<: *K8S_DEPLOY_SETTINGS + # namespace: ${K8S_DEPLOY_NS_DEV} + # mgmt_deployment_yaml: namespaces/${K8S_DEPLOY_NS_DEV}/app.deployment.yaml + # when: + # environment: dev + # event: deployment + + # # autodeploy staging if it hits the branch + # "deploy ${DRONE_COMMIT_SHA:0:10} to ${K8S_DEPLOY_NS_STAGING} (auto)": + # <<: *K8S_DEPLOY_SETTINGS + # namespace: ${K8S_DEPLOY_NS_STAGING} + # mgmt_deployment_yaml: namespaces/${K8S_DEPLOY_NS_STAGING}/app.deployment.yaml + # when: + # event: *BUILD_EVENTS + # branch: [staging] + + # # promote staging through "drone deploy staging" + # "deploy ${DRONE_COMMIT_SHA:0:10} to ${K8S_DEPLOY_NS_STAGING} (promote)": + # <<: *K8S_DEPLOY_SETTINGS + # namespace: ${K8S_DEPLOY_NS_STAGING} + # mgmt_deployment_yaml: namespaces/${K8S_DEPLOY_NS_STAGING}/app.deployment.yaml + # when: + # environment: staging + # event: deployment + + # # promote production through "drone deploy production" + # "deploy ${DRONE_COMMIT_SHA:0:10} to ${K8S_DEPLOY_NS_PRODUCTION} (promote)": + # <<: *K8S_DEPLOY_SETTINGS + # namespace: ${K8S_DEPLOY_NS_PRODUCTION} + # mgmt_deployment_yaml: namespaces/${K8S_DEPLOY_NS_PRODUCTION}/app.deployment.yaml + # when: + # environment: production + # event: deployment + +# Long living services where the startup order does not matter (otherwise use detach: true) +services: + # --------------------------------------------------------------------------- + # SERVICES + # --------------------------------------------------------------------------- + + "env": + image: alpine + commands: + - "env | sort" + + "postgres": + image: postgres:12.4-alpine # should be the same version as used in .drone.yml, .github/workflows, Dockerfile and live + environment: + POSTGRES_DB: *PGDATABASE + POSTGRES_USER: *PGUSER + POSTGRES_PASSWORD: *PGPASSWORD + # ATTENTION + # fsync=off, synchronous_commit=off and full_page_writes=off + # gives us a major speed up during local development and testing (~30%), + # however you should NEVER use these settings in PRODUCTION unless + # you want to have CORRUPTED data. + # DO NOT COPY/PASTE THIS BLINDLY. + # YOU HAVE BEEN WARNED. + # Apply some performance improvements to pg as these guarantees are not needed while running integration tests + command: "-c 'shared_buffers=128MB' -c 'fsync=off' -c 'synchronous_commit=off' -c 'full_page_writes=off' -c 'max_connections=100' -c 'client_min_messages=warning'" + <<: *WHEN_BUILD_EVENT diff --git a/.github/workflows/build-publish.yml b/.github/workflows/build-publish.yml new file mode 100644 index 0000000..febf3a2 --- /dev/null +++ b/.github/workflows/build-publish.yml @@ -0,0 +1,53 @@ +# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images +name: Build and push image + +on: + push: + tags: "**" + branches: "**" + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + + build-and-push-image: + + name: Build and push image + runs-on: ubuntu-latest + + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Log in to the Container registry + uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + - name: Build and Push Image + uses: docker/build-push-action@v3 + with: + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/amd64,linux/arm64 + push: true \ No newline at end of file diff --git a/.gitignore b/.gitignore index 6ec4c1c..24c54ca 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,6 @@ bin # local go mod cache .pkg + +# temporary files +tmp \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..af28163 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,29 @@ +linters: + enable: + # https://github.com/golangci/golangci-lint#enabled-by-default-linters + # Additional linters you want to activate may be specified here... + + # --- + # https://github.com/mgechev/revive + # replacement for the now deprecated official golint linter, see https://github.com/golang/go/issues/38968 + - revive + + # --- + # https://github.com/maratori/testpackage + # used to enforce blackbox testing + - testpackage + + # --- + # https://github.com/securego/gosec + # inspects source code for security problems by scanning the Go AST. + - gosec + + # --- + # https://github.com/sivchari/tenv + # prefer t.Setenv instead of os.Setenv within test code. + - tenv + + # --- + # https://github.com/polyfloyd/go-errorlint + # ensure we are comparing errors via errors.Is, types/values via errors.As and wrap errors with %w. + - errorlint diff --git a/.trivyignore b/.trivyignore new file mode 100644 index 0000000..e69de29 diff --git a/.vscode/launch.json b/.vscode/launch.json index c710274..14667b8 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -9,9 +9,9 @@ "type": "go", "request": "launch", "mode": "auto", - "program": "${fileDirname}/cmd/server", + "program": "${workspaceFolder}/cmd/server", "env": {}, - "args": [] + "args": [], } ] } \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json index a58e5bf..6104e5c 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -11,6 +11,6 @@ "kind": "build", "isDefault": true } - } + }, ] } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..2ad908c --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,135 @@ +# Changelog + +- [Changelog](#changelog) + - [Structure](#structure) + - [Unreleased](#unreleased) + - [v1.1.0](#v110) + - [General](#general) + - [Known issues](#known-issues) + - [Added](#added) + - [Changed](#changed) + - [Environment Variables](#environment-variables) + - [Manager/Pool-related](#managerpool-related) + - [Server-related](#server-related) + - [v1.0.0](#v100) + + +## Structure + +- All notable changes to this project will be documented in this file. +- The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). +- We try to follow [semantic versioning](https://semver.org/). +- All changes have a **git tag** available, are build and published to GitHub packages as a docker image. + +## Unreleased + +## v1.1.0 + +> Special thanks to [Anna - @anjankow](https://github.com/anjankow) for her contributions to this release! + +### General +- Major refactor of the pool manager, while the API should still be backwards-compatible. There should not be any breaking changes when it comes to using the client libraries. +- The main goal of this release is to bring IntegreSQL's performance on par with our previous native Node.js implementation. + - Specifially we wanted to eliminate some long-running mutex locks (especially when the pool hits the configured pool limit) and make the codebase more maintainable and easier to extend in the future. + - While the above should be already visible in CI-environment, the subjective performance gain while developing locally could be even bigger. + +### Known issues +- We still have no mechanism to limit the global (cross-pool) number of test-databases. + - This is especially problematic if you have many pools running at the same time. + - This could lead to situations where the pool manager is unable to create a new test-databases because the limit (e.g. disk size) is reached even tough some pools/test-databases will probably never be used again. + - This is a **known issue** and will be addressed in a future release. +- OpenAPI/Swagger API documentation is still missing, we are working on it. + +### Added +- GitHub Packages + - Going forward, images are built via GitHub Actions and published to GitHub packages. +- ARM Docker images + - Arm64 is now supported (Apple Silicon M1/M2/M3), we publish a multi-arch image (`linux/amd64,linux/arm64`). + - Closes [#15](https://github.com/allaboutapps/integresql/issues/15) +- We added the `POST /api/v1/templates/:hash/tests/:id/recreate` endpoint to the API. + - You can use it to express that you no longer using this database and it can be recreated and returned to the pool. + - Using this endpoint means you want to break out of our FIFO (first in, first out) recreating queue and get your test-database recreated as soon as possible. + - Explicitly calling recreate is **optional** of course! + - Closes [#2](https://github.com/allaboutapps/integresql/issues/2) +- Minor: Added woodpecker/drone setup (internal allaboutapps CI/CD) + +### Changed +- Redesigned Database Pool Logic and Template Management + - Reimplemented pool and template logic, separated DB template management from test DB pool, and added per pool workers for preparing test DBs in the background. +- Soft-deprecated the `DELETE /api/v1/templates/:hash/tests/:id` endpoint in favor of `POST /api/v1/templates/:hash/tests/:id/unlock`. + - We did a bad job describing the previous functionality of this endpoint: It's really only deleting the lock, so the exact same test-database can be used again. + - The new `POST /api/v1/templates/:hash/tests/:id/recreate` vs. `POST /api/v1/templates/:hash/tests/:id/unlock` endpoint naming is way more explicit in what it does. + - Closes [#13](https://github.com/allaboutapps/integresql/issues/13) +- Logging and Debugging Improvements + - Introduced zerolog for better logging in the pool and manager modules. Debug statements were refined, and unnecessary print debugging was disabled. +- Changed details around installing locally in README.md (still not recommended, use the Docker image instead), closes [#7](https://github.com/allaboutapps/integresql/issues/7) +- Fix documentation / READMEs, especially provide integration diagrams and details on the project architecture + - Closes [#5](https://github.com/allaboutapps/integresql/issues/5) + +### Environment Variables + +There have been quite a few additions and changes, thus we have the in-depth details here. + +#### Manager/Pool-related + +- Changed `INTEGRESQL_TEST_MAX_POOL_SIZE`: + - Maximal pool size that won't be exceeded + - Defaults to "your number of CPU cores 4 times" [`runtime.NumCPU()*4`](https://pkg.go.dev/runtime#NumCPU) + - Previous default was `500` (hardcoded) + - This might be a **significant change** for some usecases, please adjust accordingly. The pooling recreation logic is now much faster, there is typically no need to have such a high limit of test-databases **per pool**! +- Changed `INTEGRESQL_TEST_INITIAL_POOL_SIZE`: + - Initial number of ready DBs prepared in background. The pool is configured to always try to have this number of ready DBs available (it actively tries to recreate databases within the pool in a FIFO manner). + - Defaults to [`runtime.NumCPU()`](https://pkg.go.dev/runtime#NumCPU) + - Previous default was `10` (hardcoded) +- Added `INTEGRESQL_POOL_MAX_PARALLEL_TASKS`: + - Maximal number of pool tasks running in parallel. Must be a number greater or equal 1. + - Defaults to [`runtime.NumCPU()`](https://pkg.go.dev/runtime#NumCPU) +- Added `INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MIN_MS`: + - Minimal time to wait after a test db recreate has failed (e.g. as client is still connected). Subsequent retries multiply this values until the maximum (below) is reached. + - Defaults to `250`ms +- Added `INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MAX_MS`: + - The maximum possible sleep time between recreation retries (e.g. 3 seconds), see above. + - Defaults to `3000`ms +- Added `INTEGRESQL_TEST_DB_MINIMAL_LIFETIME_MS`: + - After a test-database transitions from `ready` to `dirty`, always block auto-recreation (FIFO) for this duration (expect `POST /api/v1/templates/:hash/tests/:id/recreate` was called manually). + - Defaults to `250`ms +- Added `INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS`: + - Internal time to wait for a template-database to transition into the 'finalized' state + - Defaults to `60000`ms (1 minute, same as `INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS`) +- Added `INTEGRESQL_TEST_DB_GET_TIMEOUT_MS`: + - Internal time to wait for a ready database (requested via `/api/v1/templates/:hash/tests`) + - Defaults to `60000`ms (1 minute, same as `INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS`) + - Previous default `10` (was hardcoded) + +#### Server-related + +- Added `INTEGRESQL_DEBUG_ENDPOINTS` + - Enables [pprof debug endpoints](https://golang.org/pkg/net/http/pprof/) under `/debug/*` + - Defaults to `false` +- Added `INTEGRESQL_ECHO_DEBUG` + - Enables [echo framework debug mode](https://echo.labstack.com/docs/customization) + - Defaults to `false` +- Added middlewares, all default to `true` + - `INTEGRESQL_ECHO_ENABLE_CORS_MIDDLEWARE`: [enables CORS](https://echo.labstack.com/docs/middleware/cors) + - `INTEGRESQL_ECHO_ENABLE_LOGGER_MIDDLEWARE`: [enables logger](https://echo.labstack.com/docs/middleware/logger) + - `INTEGRESQL_ECHO_ENABLE_RECOVER_MIDDLEWARE`: [enables recover](https://echo.labstack.com/docs/middleware/recover) + - `INTEGRESQL_ECHO_ENABLE_REQUEST_ID_MIDDLEWARE`: [sets request_id to context](https://echo.labstack.com/docs/middleware/request-id) + - `INTEGRESQL_ECHO_ENABLE_TRAILING_SLASH_MIDDLEWARE`: [auto-adds trailing slash](https://echo.labstack.com/docs/middleware/trailing-slash) + - `INTEGRESQL_ECHO_ENABLE_REQUEST_TIMEOUT_MIDDLEWARE`: [enables timeout middleware](https://echo.labstack.com/docs/middleware/timeout) +- Added `INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS` + - Generic timeout handling for most endpoints. + - Defaults to `60000`ms (1 minute, same as `INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS` and `INTEGRESQL_TEST_DB_GET_TIMEOUT_MS`) +- Added `INTEGRESQL_LOGGER_LEVEL` + - Defaults to `info` +- Added `INTEGRESQL_LOGGER_REQUEST_LEVEL` + - Defaults to `info` +- Added the following logging settings, which all default to `false` + - `INTEGRESQL_LOGGER_LOG_REQUEST_BODY`: Should the request-log include the body? + - `INTEGRESQL_LOGGER_LOG_REQUEST_HEADER`: Should the request-log include headers? + - `INTEGRESQL_LOGGER_LOG_REQUEST_QUERY`: Should the request-log include the query? + - `INTEGRESQL_LOGGER_LOG_RESPONSE_BODY`: Should the request-log include the response body? + - `INTEGRESQL_LOGGER_LOG_RESPONSE_HEADER`: Should the request-log include the response header? + - `INTEGRESQL_LOGGER_PRETTY_PRINT_CONSOLE`: Should the console logger pretty-print the log (instead of json)? + +## v1.0.0 +- Initial release May 2020 diff --git a/Dockerfile b/Dockerfile index 0b96f57..b0907ca 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,60 +1,177 @@ -FROM golang:1.14.2 AS development +### ----------------------- +# --- Stage: development +# --- Purpose: Local development environment +# --- https://hub.docker.com/_/golang +# --- https://github.com/microsoft/vscode-remote-try-go/blob/master/.devcontainer/Dockerfile +### ----------------------- +FROM golang:1.21.6-bullseye AS development -# https://github.com/go-modules-by-example/index/blob/master/010_tools/README.md#walk-through -ENV GOBIN /app/bin -ENV PATH $GOBIN:$PATH +# Avoid warnings by switching to noninteractive +ENV DEBIAN_FRONTEND=noninteractive + +# Our Makefile / env fully supports parallel job execution +ENV MAKEFLAGS "-j 8 --no-print-directory" # postgresql-support: Add the official postgres repo to install the matching postgresql-client tools of your stack -# see https://wiki.postgresql.org/wiki/Apt +# https://wiki.postgresql.org/wiki/Apt # run lsb_release -c inside the container to pick the proper repository flavor -# e.g. stretch=>stretch-pgdg, buster=>buster-pgdg -RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ buster-pgdg main" \ +# e.g. stretch=>stretch-pgdg, buster=>buster-pgdg, bullseye=>bullseye-pgdg +RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ bullseye-pgdg main" \ | tee /etc/apt/sources.list.d/pgdg.list \ && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc \ | apt-key add - # Install required system dependencies RUN apt-get update \ - && apt-get install -y --no-install-recommends \ + && apt-get install -y \ + # + # Mandadory minimal linux packages + # Installed at development stage and app stage + # Do not forget to add mandadory linux packages to the final app Dockerfile stage below! + # + # -- START MANDADORY -- + ca-certificates \ + # --- END MANDADORY --- + # + # Development specific packages + # Only installed at development stage and NOT available in the final Docker stage + # based upon + # https://github.com/microsoft/vscode-remote-try-go/blob/master/.devcontainer/Dockerfile + # https://raw.githubusercontent.com/microsoft/vscode-dev-containers/master/script-library/common-debian.sh + # + # icu-devtools: https://stackoverflow.com/questions/58736399/how-to-get-vscode-liveshare-extension-working-when-running-inside-vscode-remote + # graphviz: https://github.com/google/pprof#building-pprof + # -- START DEVELOPMENT -- + apt-utils \ + dialog \ + openssh-client \ + less \ + iproute2 \ + procps \ + lsb-release \ locales \ + sudo \ + bash-completion \ + bsdmainutils \ + graphviz \ + xz-utils \ postgresql-client-12 \ + icu-devtools \ + tmux \ + rsync \ + # --- END DEVELOPMENT --- + # && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# vscode support: LANG must be supported, requires installing the locale package first -# see https://github.com/Microsoft/vscode/issues/58015 +# env/vscode support: LANG must be supported, requires installing the locale package first +# https://github.com/Microsoft/vscode/issues/58015 +# https://stackoverflow.com/questions/28405902/how-to-set-the-locale-inside-a-debian-ubuntu-docker-container RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 ENV LANG en_US.UTF-8 -# sql-formatting: Install the same version of pg_formatter as used in your editors, as of 2020-03 thats v4.2 -# https://github.com/darold/pgFormatter/releases +# sql pgFormatter: Integrates with vscode-pgFormatter (we pin pgFormatter.pgFormatterPath for the extension to this version) +# requires perl to be installed # https://github.com/bradymholt/vscode-pgFormatter/commits/master -RUN wget https://github.com/darold/pgFormatter/archive/v4.2.tar.gz \ - && tar xzf v4.2.tar.gz \ - && cd pgFormatter-4.2 \ +# https://github.com/darold/pgFormatter/releases +RUN mkdir -p /tmp/pgFormatter \ + && cd /tmp/pgFormatter \ + && wget https://github.com/darold/pgFormatter/archive/v5.5.tar.gz \ + && tar xzf v5.5.tar.gz \ + && cd pgFormatter-5.5 \ && perl Makefile.PL \ - && make && make install + && make && make install \ + && rm -rf /tmp/pgFormatter -# go richgo: (this package should NOT be installed via go get) -# https://github.com/kyoh86/richgo/releases -RUN wget https://github.com/kyoh86/richgo/releases/download/v0.3.3/richgo_0.3.3_linux_amd64.tar.gz \ - && tar xzf richgo_0.3.3_linux_amd64.tar.gz \ - && cp richgo /usr/local/bin/richgo +# go gotestsum: (this package should NOT be installed via go get) +# https://github.com/gotestyourself/gotestsum/releases +RUN mkdir -p /tmp/gotestsum \ + && cd /tmp/gotestsum \ + && ARCH="$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)" \ + && wget "https://github.com/gotestyourself/gotestsum/releases/download/v1.11.0/gotestsum_1.11.0_linux_${ARCH}.tar.gz" \ + && tar xzf "gotestsum_1.11.0_linux_${ARCH}.tar.gz" \ + && cp gotestsum /usr/local/bin/gotestsum \ + && rm -rf /tmp/gotestsum # go linting: (this package should NOT be installed via go get) # https://github.com/golangci/golangci-lint#binary +# https://github.com/golangci/golangci-lint/releases RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh \ - | sh -s -- -b $(go env GOPATH)/bin v1.24.0 + | sh -s -- -b $(go env GOPATH)/bin v1.55.2 -# go swagger: (this package should NOT be installed via go get) +# go swagger: (this package should NOT be installed via go get) # https://github.com/go-swagger/go-swagger/releases -RUN curl -o /usr/local/bin/swagger -L'#' \ - "https://github.com/go-swagger/go-swagger/releases/download/v0.23.0/swagger_linux_amd64" \ +RUN ARCH="$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)" \ + && curl -o /usr/local/bin/swagger -L'#' \ + "https://github.com/go-swagger/go-swagger/releases/download/v0.29.0/swagger_linux_${ARCH}" \ && chmod +x /usr/local/bin/swagger +# lichen: go license util +# TODO: Install from static binary as soon as it becomes available. +# https://github.com/uw-labs/lichen/tags +RUN go install github.com/uw-labs/lichen@v0.1.7 + +# watchexec +# https://github.com/watchexec/watchexec/releases +RUN mkdir -p /tmp/watchexec \ + && cd /tmp/watchexec \ + && wget https://github.com/watchexec/watchexec/releases/download/v1.20.6/watchexec-1.20.6-$(arch)-unknown-linux-musl.tar.xz \ + && tar xf watchexec-1.20.6-$(arch)-unknown-linux-musl.tar.xz \ + && cp watchexec-1.20.6-$(arch)-unknown-linux-musl/watchexec /usr/local/bin/watchexec \ + && rm -rf /tmp/watchexec + +# yq +# https://github.com/mikefarah/yq/releases +RUN mkdir -p /tmp/yq \ + && cd /tmp/yq \ + && ARCH="$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)" \ + && wget "https://github.com/mikefarah/yq/releases/download/v4.40.5/yq_linux_${ARCH}.tar.gz" \ + && tar xzf "yq_linux_${ARCH}.tar.gz" \ + && cp "yq_linux_${ARCH}" /usr/local/bin/yq \ + && rm -rf /tmp/yq + +# linux permissions / vscode support: Add user to avoid linux file permission issues +# Detail: Inside the container, any mounted files/folders will have the exact same permissions +# as outside the container - including the owner user ID (UID) and group ID (GID). +# Because of this, your container user will either need to have the same UID or be in a group with the same GID. +# The actual name of the user / group does not matter. The first user on a machine typically gets a UID of 1000, +# so most containers use this as the ID of the user to try to avoid this problem. +# 2020-04: docker-compose does not support passing id -u / id -g as part of its config, therefore we assume uid 1000 +# https://code.visualstudio.com/docs/remote/containers-advanced#_adding-a-nonroot-user-to-your-dev-container +# https://code.visualstudio.com/docs/remote/containers-advanced#_creating-a-nonroot-user +ARG USERNAME=development +ARG USER_UID=1000 +ARG USER_GID=$USER_UID + +RUN groupadd --gid $USER_GID $USERNAME \ + && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME \ + && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ + && chmod 0440 /etc/sudoers.d/$USERNAME + +# vscode support: cached extensions install directory +# https://code.visualstudio.com/docs/remote/containers-advanced#_avoiding-extension-reinstalls-on-container-rebuild +RUN mkdir -p /home/$USERNAME/.vscode-server/extensions \ + /home/$USERNAME/.vscode-server-insiders/extensions \ + && chown -R $USERNAME \ + /home/$USERNAME/.vscode-server \ + /home/$USERNAME/.vscode-server-insiders + +# linux permissions / vscode support: chown $GOPATH so $USERNAME can directly work with it +# Note that this should be the final step after installing all build deps +RUN mkdir -p /$GOPATH/pkg && chown -R $USERNAME /$GOPATH + +# $GOBIN is where our own compiled binaries will live and other go.mod / VSCode binaries will be installed. +# It should always come AFTER our other $PATH segments and should be earliest targeted in stage "builder", +# as /app/bin will the shadowed by a volume mount via docker-compose! +# E.g. "which golangci-lint" should report "/go/bin" not "/app/bin" (where VSCode will place it). +# https://github.com/go-modules-by-example/index/blob/master/010_tools/README.md#walk-through +WORKDIR /app +ENV GOBIN /app/bin +ENV PATH $PATH:$GOBIN + ### ----------------------- # --- Stage: builder ### ----------------------- @@ -65,24 +182,25 @@ COPY Makefile /app/Makefile COPY go.mod /app/go.mod COPY go.sum /app/go.sum COPY tools.go /app/tools.go -RUN make modules && make tools +RUN make modules +COPY tools.go /app/tools.go +RUN make tools COPY . /app/ - -### ----------------------- -# --- Stage: builder-integresql -### ----------------------- - -FROM builder as builder-integresql -RUN make build +RUN make go-build ### ----------------------- # --- Stage: integresql ### ----------------------- -# https://github.com/GoogleContainerTools/distroless -FROM gcr.io/distroless/base as integresql -COPY --from=builder-integresql /app/bin/integresql / +# Distroless images are minimal and lack shell access. +# https://github.com/GoogleContainerTools/distroless/blob/master/base/README.md +# The :debug image provides a busybox shell to enter. +# https://github.com/GoogleContainerTools/distroless#debug-images +FROM gcr.io/distroless/base-debian11:debug as integresql +COPY --from=builder /app/bin/integresql / # Note that cmd is not supported with these kind of images, no shell included # see https://github.com/GoogleContainerTools/distroless/issues/62 # and https://github.com/GoogleContainerTools/distroless#entrypoints -ENTRYPOINT [ "/integresql" ] \ No newline at end of file +ENTRYPOINT [ "/integresql" ] + +EXPOSE 5000 diff --git a/Makefile b/Makefile index aa3b0ea..4e4701c 100644 --- a/Makefile +++ b/Makefile @@ -1,47 +1,186 @@ -# first is default task when running "make" without args -build: format gobuild lint +### ----------------------- +# --- Building +### ----------------------- -format: - go fmt +# first is default target when running "make" without args +build: ##- Default 'make' target: go-format, go-build and lint. + @$(MAKE) go-format + @$(MAKE) go-build + @$(MAKE) lint -gobuild: - go build -o bin/integresql ./cmd/server +# useful to ensure that everything gets resetuped from scratch +all: clean init ##- Runs all of our common make targets: clean, init, build and test. + @$(MAKE) build + @$(MAKE) test -lint: - golangci-lint run --fast +info: info-go ##- Prints additional info -# https://github.com/golang/go/issues/24573 -# w/o cache - see "go help testflag" -# use https://github.com/kyoh86/richgo to color +info-go: ##- (opt) Prints go.mod updates, module-name and current go version. + @echo "[go.mod]" > tmp/.info-go + @$(MAKE) get-go-outdated-modules >> tmp/.info-go + @$(MAKE) info-module-name >> tmp/.info-go + @go version >> tmp/.info-go + @cat tmp/.info-go + +lint: go-lint ##- Runs golangci-lint and make check-*. + +go-format: ##- (opt) Runs go format. + go fmt ./... + +go-build: ##- (opt) Runs go build. + go build -ldflags $(LDFLAGS) -o bin/integresql ./cmd/server + +go-lint: ##- (opt) Runs golangci-lint. + golangci-lint run --timeout 5m + +bench: ##- Run tests, output by package, print coverage. + @go test -benchmem=false -run=./... -bench . github.com/allaboutapps/integresql/tests -race -count=4 -v + +# https://github.com/gotestyourself/gotestsum#format +# w/o cache https://github.com/golang/go/issues/24573 - see "go help testflag" # note that these tests should not run verbose by default (e.g. use your IDE for this) # TODO: add test shuffling/seeding when landed in go v1.15 (https://github.com/golang/go/issues/28592) -test: - richgo test -cover -race -count=1 ./... +# tests by pkgname +test: ##- Run tests, output by package, print coverage. + @$(MAKE) go-test-by-pkg + @$(MAKE) go-test-print-coverage + +# note that we explicitly don't want to use a -coverpkg=./... option, per pkg coverage take precedence +go-test-by-pkg: ##- (opt) Run tests, output by package. + gotestsum --format pkgname-and-test-fails --format-hide-empty-pkg --jsonfile /tmp/test.log -- -race -cover -count=1 -coverprofile=/tmp/coverage.out ./... + +go-test-by-name: ##- (opt) Run tests, output by testname. + gotestsum --format testname --jsonfile /tmp/test.log -- -race -cover -count=1 -coverprofile=/tmp/coverage.out ./... + +go-test-print-coverage: ##- (opt) Print overall test coverage (must be done after running tests). + @printf "coverage " + @go tool cover -func=/tmp/coverage.out | tail -n 1 | awk '{$$1=$$1;print}' -init: modules tools tidy +# TODO: switch to "-m direct" after go 1.17 hits: https://github.com/golang/go/issues/40364 +get-go-outdated-modules: ##- (opt) Prints outdated (direct) go modules (from go.mod). + @((go list -u -m -f '{{if and .Update (not .Indirect)}}{{.}}{{end}}' all) 2>/dev/null | grep " ") || echo "go modules are up-to-date." + + +### ----------------------- +# --- Initializing +### ----------------------- + +init: ##- Runs make modules, tools and tidy. + @$(MAKE) modules + @$(MAKE) tools + @$(MAKE) tidy @go version # cache go modules (locally into .pkg) -modules: +modules: ##- (opt) Cache packages as specified in go.mod. go mod download # https://marcofranssen.nl/manage-go-tools-via-go-modules/ -tools: - cat tools.go | grep _ | awk -F'"' '{print $$2}' | xargs -tI % go install % +tools: ##- (opt) Install packages as specified in tools.go. + @cat tools.go | grep _ | awk -F'"' '{print $$2}' | xargs -P $$(nproc) -tI % go install % -tidy: +tidy: ##- (opt) Tidy our go.sum file. go mod tidy -clean: - rm -rf bin +### ----------------------- +# --- SQL +### ----------------------- -reset: +reset: ##- Wizard to drop and create our development database. @echo "DROP & CREATE database:" @echo " PGHOST=${PGHOST} PGDATABASE=${PGDATABASE}" PGUSER=${PGUSER} @echo -n "Are you sure? [y/N] " && read ans && [ $${ans:-N} = y ] psql -d postgres -c 'DROP DATABASE IF EXISTS "${PGDATABASE}";' psql -d postgres -c 'CREATE DATABASE "${PGDATABASE}" WITH OWNER ${PGUSER} TEMPLATE "template0"' +### ----------------------- +# --- Binary checks +### ----------------------- + +# Got license issues with some dependencies? Provide a custom lichen --config +# see https://github.com/uw-labs/lichen#config +get-licenses: ##- Prints licenses of embedded modules in the compiled bin/integresql. + lichen bin/integresql + +get-embedded-modules: ##- Prints embedded modules in the compiled bin/integresql. + go version -m -v bin/integresql + +get-embedded-modules-count: ##- (opt) Prints count of embedded modules in the compiled bin/integresql. + go version -m -v bin/integresql | grep $$'\tdep' | wc -l + + +### ----------------------- +# --- Helpers +### ----------------------- + +clean: ##- Cleans tmp folders. + @echo "make clean" + @rm -rf tmp/* 2> /dev/null + @rm -rf api/tmp/* 2> /dev/null + +get-module-name: ##- Prints current go module-name (pipeable). + @echo "${GO_MODULE_NAME}" + +info-module-name: ##- (opt) Prints current go module-name. + @echo "go module-name: '${GO_MODULE_NAME}'" + +get-go-ldflags: ##- (opt) Prints used -ldflags as evaluated in Makefile used in make go-build + @echo $(LDFLAGS) + +# https://gist.github.com/prwhite/8168133 - based on comment from @m000 +help: ##- Show common make targets. + @echo "usage: make " + @echo "note: use 'make help-all' to see all make targets." + @echo "" + @sed -e '/#\{2\}-/!d; s/\\$$//; s/:[^#\t]*/@/; s/#\{2\}- *//' $(MAKEFILE_LIST) | grep --invert "(opt)" | sort | column -t -s '@' + +help-all: ##- Show all make targets. + @echo "usage: make " + @echo "note: make targets flagged with '(opt)' are part of a main target." + @echo "" + @sed -e '/#\{2\}-/!d; s/\\$$//; s/:[^#\t]*/@/; s/#\{2\}- *//' $(MAKEFILE_LIST) | sort | column -t -s '@' + +### ----------------------- +# --- Make variables +### ----------------------- + +# go module name (as in go.mod) +GO_MODULE_NAME = github.com/allaboutapps/integresql + +# only evaluated if required by a recipe +# http://make.mad-scientist.net/deferred-simple-variable-expansion/ + +# https://medium.com/the-go-journey/adding-version-information-to-go-binaries-e1b79878f6f2 +ARG_COMMIT = $(eval ARG_COMMIT := $$(shell \ + (git rev-list -1 HEAD 2> /dev/null) \ + || (echo "unknown") \ +))$(ARG_COMMIT) + +ARG_BUILD_DATE = $(eval ARG_BUILD_DATE := $$(shell \ + (date -Is 2> /dev/null || date 2> /dev/null || echo "unknown") \ +))$(ARG_BUILD_DATE) + +# https://www.digitalocean.com/community/tutorials/using-ldflags-to-set-version-information-for-go-applications +LDFLAGS = $(eval LDFLAGS := "\ +-X '$(GO_MODULE_NAME)/internal/config.ModuleName=$(GO_MODULE_NAME)'\ +-X '$(GO_MODULE_NAME)/internal/config.Commit=$(ARG_COMMIT)'\ +-X '$(GO_MODULE_NAME)/internal/config.BuildDate=$(ARG_BUILD_DATE)'\ +")$(LDFLAGS) + +### ----------------------- +# --- Special targets +### ----------------------- + +# https://www.gnu.org/software/make/manual/html_node/Special-Targets.html # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html # ignore matching file/make rule combinations in working-dir -.PHONY: test +.PHONY: test help + +# https://unix.stackexchange.com/questions/153763/dont-stop-makeing-if-a-command-fails-but-check-exit-status +# https://www.gnu.org/software/make/manual/html_node/One-Shell.html +# required to ensure make fails if one recipe fails (even on parallel jobs) and on pipefails +.ONESHELL: + +# normal POSIX bash shell mode +SHELL = /bin/bash +.SHELLFLAGS = -cEeuo pipefail diff --git a/README.md b/README.md index 6071f42..754b8bd 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,72 @@ + + # IntegreSQL -`IntegreSQL` manages isolated PostgreSQL databases for your integration tests. +IntegreSQL manages isolated PostgreSQL databases for your integration tests. Do your engineers a favour by allowing them to write fast executing, parallel and deterministic integration tests utilizing **real** PostgreSQL test databases. Resemble your live environment in tests as close as possible. -[![](https://img.shields.io/docker/image-size/allaboutapps/integresql)](https://hub.docker.com/r/allaboutapps/integresql) [![](https://img.shields.io/docker/pulls/allaboutapps/integresql)](https://hub.docker.com/r/allaboutapps/integresql) [![Docker Cloud Build Status](https://img.shields.io/docker/cloud/build/allaboutapps/integresql)](https://hub.docker.com/r/allaboutapps/integresql) [![](https://goreportcard.com/badge/github.com/allaboutapps/integresql)](https://goreportcard.com/report/github.com/allaboutapps/integresql) ![](https://github.com/allaboutapps/integresql/workflows/build/badge.svg?branch=master) +```mermaid +sequenceDiagram + You->>Testrunner: Start tests + + Testrunner->>IntegreSQL: New template database + IntegreSQL->>PostgreSQL: + PostgreSQL-->>IntegreSQL: + IntegreSQL-->>Testrunner: + + Testrunner->>PostgreSQL: Connect to template database, apply all migrations, seed all fixtures, ..., disconnect. + PostgreSQL-->>Testrunner: + + Testrunner->>IntegreSQL: Finalize the template database + IntegreSQL-->>Testrunner: + + Note over Testrunner,PostgreSQL: Your test runner can now get isolated test databases for this hash from the pool! + + loop Each test + Testrunner->>IntegreSQL: Get test database (looks like template database) + Testrunner->>PostgreSQL: + Note over Testrunner,PostgreSQL: Run your test code in an isolated test database! + + Testrunner-xPostgreSQL: Disconnect from the test database. + end +``` + +[![](https://goreportcard.com/badge/github.com/allaboutapps/integresql)](https://goreportcard.com/report/github.com/allaboutapps/integresql) ![](https://github.com/allaboutapps/integresql/workflows/build/badge.svg?branch=master) - [IntegreSQL](#integresql) + - [Install](#install) + - [Usage](#usage) + - [Run using Docker (preferred)](#run-using-docker-preferred) + - [Run locally (not recommended)](#run-locally-not-recommended) + - [Run within your CI/CD](#run-within-your-cicd) + - [GitHub Actions](#github-actions) + - [Integrate](#integrate) + - [Integrate by client lib](#integrate-by-client-lib) + - [Integrate by RESTful JSON calls](#integrate-by-restful-json-calls) + - [Once per test runner/process](#once-per-test-runnerprocess) + - [Testrunner creates a new template database](#testrunner-creates-a-new-template-database) + - [Testrunner reuses an existing template database](#testrunner-reuses-an-existing-template-database) + - [Failure modes while template database setup: 503](#failure-modes-while-template-database-setup-503) + - [Per each test](#per-each-test) + - [New test database per test](#new-test-database-per-test) + - [Optional: Manually unlocking a test database after a readonly test](#optional-manually-unlocking-a-test-database-after-a-readonly-test) + - [Optional: Manually recreating a test database](#optional-manually-recreating-a-test-database) + - [Failure modes while getting a new test database](#failure-modes-while-getting-a-new-test-database) + - [StatusNotFound 404](#statusnotfound-404) + - [StatusGone 410](#statusgone-410) + - [StatusServiceUnavailable 503](#statusserviceunavailable-503) + - [Demo](#demo) + - [Configuration](#configuration) + - [Architecture](#architecture) + - [TestDatabase states](#testdatabase-states) + - [Pool structure](#pool-structure) - [Background](#background) - [Approach 0: Leaking database mutations for subsequent tests](#approach-0-leaking-database-mutations-for-subsequent-tests) - [Approach 1: Isolating by resetting](#approach-1-isolating-by-resetting) @@ -18,22 +78,477 @@ Do your engineers a favour by allowing them to write fast executing, parallel an - [Approach 3c benchmark 1: Baseline](#approach-3c-benchmark-1-baseline) - [Approach 3c benchmark 2: Small project](#approach-3c-benchmark-2-small-project) - [Final approach: IntegreSQL](#final-approach-integresql) - - [Integrate by client lib](#integrate-by-client-lib) - - [Integrate by RESTful JSON calls](#integrate-by-restful-json-calls) - - [Demo](#demo) - - [Install](#install) - - [Install using Docker (preferred)](#install-using-docker-preferred) - - [Install locally](#install-locally) - - [Configuration](#configuration) - - [Usage](#usage) - - [Run using Docker (preferred)](#run-using-docker-preferred) - - [Run locally](#run-locally) + - [Benchmarks](#benchmarks) + - [Benchmark v1.1.0 vs v1.0.0](#benchmark-v110-vs-v100) - [Contributing](#contributing) - [Development setup](#development-setup) - [Development quickstart](#development-quickstart) - [Maintainers](#maintainers) + - [Previous maintainers](#previous-maintainers) - [License](#license) + +## Install + +A minimal Docker image is published on GitHub Packages. See [GitHub Releases](https://github.com/allaboutapps/integresql/releases). + +```bash +docker pull ghcr.io/allaboutapps/integresql: +``` + +## Usage + +IntegreSQL is a RESTful JSON API distributed as Docker image and go cli. It's language agnostic and manages multiple [PostgreSQL templates](https://supabase.io/blog/2020/07/09/postgresql-templates/) and their separate pool of test databases for your tests. It keeps the pool of test databases warm (as it's running in the background) and is fit for parallel test execution with multiple test runners / processes. + + +### Run using Docker (preferred) + +Simply start a [Docker](https://docs.docker.com/install/) (19.03 or above) container, provide the required environment variables and expose the server port: + +```bash +docker run -d --name integresql -e INTEGRESQL_PORT=5000 -p 5000:5000 ghcr.io/allaboutapps/integresql: +``` + +The container can also be included in your project via [Docker Compose](https://docs.docker.com/compose/install/) (1.25 or above): + +```yaml +version: "3.4" +services: + + # Your main service image + service: + depends_on: + - postgres + - integresql + environment: + PGDATABASE: &PGDATABASE "development" + PGUSER: &PGUSER "dbuser" + PGPASSWORD: &PGPASSWORD "9bed16f749d74a3c8bfbced18a7647f5" + PGHOST: &PGHOST "postgres" + PGPORT: &PGPORT "5432" + PGSSLMODE: &PGSSLMODE "disable" + + # optional: env for integresql client testing + # see https://github.com/allaboutapps/integresql-client-go + # INTEGRESQL_CLIENT_BASE_URL: "http://integresql:5000/api" + + # [...] additional main service setup + + integresql: + image: ghcr.io/allaboutapps/integresql: + ports: + - "5000:5000" + depends_on: + - postgres + environment: + PGHOST: *PGHOST + PGUSER: *PGUSER + PGPASSWORD: *PGPASSWORD + + postgres: + image: postgres:12.2-alpine # should be the same version as used live + # ATTENTION + # fsync=off, synchronous_commit=off and full_page_writes=off + # gives us a major speed up during local development and testing (~30%), + # however you should NEVER use these settings in PRODUCTION unless + # you want to have CORRUPTED data. + # DO NOT COPY/PASTE THIS BLINDLY. + # YOU HAVE BEEN WARNED. + # Apply some performance improvements to pg as these guarantees are not needed while running locally + command: "postgres -c 'shared_buffers=128MB' -c 'fsync=off' -c 'synchronous_commit=off' -c 'full_page_writes=off' -c 'max_connections=100' -c 'client_min_messages=warning'" + expose: + - "5432" + ports: + - "5432:5432" + environment: + POSTGRES_DB: *PGDATABASE + POSTGRES_USER: *PGUSER + POSTGRES_PASSWORD: *PGPASSWORD + volumes: + - pgvolume:/var/lib/postgresql/data + +volumes: + pgvolume: # declare a named volume to persist DB data +``` + +You may also refer to our [go-starter `docker-compose.yml`](https://github.com/allaboutapps/go-starter/blob/master/docker-compose.yml). + +### Run locally (not recommended) + +Installing IntegreSQL locally requires a working [Go](https://golang.org/dl/) (1.14 or above) environment. Install the `integresql` executable to your Go bin folder: + +```bash +# This installs the latest version of IntegreSQL into your $GOBIN +go install github.com/allaboutapps/integresql/cmd/server@latest + +# you may want to rename the binary to integresql after installing: +mv $GOBIN/server $GOBIN/integresql +``` + +Running the IntegreSQL server locally requires configuration via exported environment variables (see below). + +```bash +export INTEGRESQL_PORT=5000 +export PGHOST=127.0.0.1 +export PGUSER=test +export PGPASSWORD=testpass +integresql +``` + +### Run within your CI/CD + +You'll also want to use integresql within your CI/CD pipeline. We recommend using the Docker image. Simply run it next to the postgres service. + +#### GitHub Actions + +For a working sample see [allaboutapps/go-starter](https://github.com/allaboutapps/go-starter/blob/master/.github/workflows/build-test.yml). + +```yaml +jobs: + build-test: + runs-on: ubuntu-latest + services: + postgres: + image: postgres: + env: + POSTGRES_DB: "development" + POSTGRES_USER: "dbuser" + POSTGRES_PASSWORD: "dbpass" + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + integresql: + image: ghcr.io/allaboutapps/integresql: + env: + PGHOST: "postgres" + PGUSER: "dbuser" + PGPASSWORD: "dbpass" +``` + + +## Integrate + +You will typically want to integrate by a client lib (see below), but you can also integrate by RESTful JSON calls directly. The flow is illustrated in the follow up section. + +### Integrate by client lib + +It's simple to integrate especially if there is already an client library available for your specific language. We currently have those: + +* Go: [integresql-client-go](https://github.com/allaboutapps/integresql-client-go) by [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) +* Python: [integresql-client-python](https://github.com/msztolcman/integresql-client-python) by [Marcin Sztolcman - @msztolcman](https://github.com/msztolcman) +* .NET: [IntegreSQL.EF](https://github.com/mcctomsk/IntegreSql.EF) by [Artur Drobinskiy - @Shaddix](https://github.com/Shaddix) +* JavaScript/TypeScript: [@devoxa/integresql-client](https://github.com/devoxa/integresql-client) by [Devoxa - @devoxa](https://github.com/devoxa) +* ... *Add your link here and make a PR* + +### Integrate by RESTful JSON calls + +A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. + +First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). When you trigger your test command (e.g. `make test`), 1..n test runners/processes can start in parallel and get ready and isoloated test database from the pool (after the template database(s) was/were initialized). + +#### Once per test runner/process + +Each test runner starts and need to communicate with IntegreSQL to setup 1..n template database pools. The following sections describe the flows/scenarios you need to implement. + +##### Testrunner creates a new template database + +```mermaid +sequenceDiagram + You->>Testrunner: make test + + Note right of Testrunner: Compute a hash over all related
files that affect your database
(migrations, fixtures, imports, etc.) + + Note over Testrunner,IntegreSQL: Create a new PostgreSQL template database
identified a the same unique hash
payload: {"hash": "string"} + + Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates + + IntegreSQL->>PostgreSQL: CREATE DATABASE
template_ + PostgreSQL-->>IntegreSQL: + + IntegreSQL-->>Testrunner: StatusOK: 200 + + Note over Testrunner,PostgreSQL: Parse the received database connection payload and connect to the template database. + + Testrunner->>PostgreSQL: Apply all migrations, seed all fixtures, ..., disconnect. + PostgreSQL-->>Testrunner: + + Note over Testrunner,IntegreSQL: Finalize the template so it can be used! + + Testrunner->>IntegreSQL: FinalizeTemplate: PUT /api/v1/templates/:hash + IntegreSQL-->>Testrunner: StatusOK: 200 + + Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! + + loop Each test + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + Testrunner->>PostgreSQL: + end +``` + +##### Testrunner reuses an existing template database + +```mermaid +sequenceDiagram + + You->>Testrunner: make test + + Note over Testrunner,IntegreSQL: Subsequent testrunners or multiple processes
simply call with the same template hash again. + + Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates + IntegreSQL-->>Testrunner: StatusLocked: 423 + + Note over Testrunner,IntegreSQL: Some other testrunner / process has already recreated
this PostgreSQL template database identified by this hash
(or is currently doing it), you can just consider
the template ready at this point. + + Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! + + loop Each test + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + Testrunner->>PostgreSQL: + end + +``` + +##### Failure modes while template database setup: 503 + +```mermaid +sequenceDiagram + + You->>Testrunner: make test + + Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates + IntegreSQL-->>Testrunner: StatusServiceUnavailable: 503 + + Note over Testrunner,PostgreSQL: Typically happens if IntegreSQL cannot communicate with
PostgreSQL, fail the test runner process in this case (e.g. exit 1). + +``` + +#### Per each test + +##### New test database per test + +Well, this is the normal flow to get a new isolated test database (prepopulated as its created from the template) for your test. + +```mermaid +sequenceDiagram + + Note right of You: ... + + loop Each test + + Note right of Testrunner: Before each test, get a new isolated test database
from the pool for the template hash. + + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + + Note over Testrunner,IntegreSQL: Blocks until the template is finalized + + Note right of IntegreSQL: The test databases for the template pool
were already created and are simply returned. + + IntegreSQL-->>Testrunner: StatusOK: 200 + + Note over Testrunner,PostgreSQL: Your runner now has a fully isolated PostgreSQL database
from our already migrated/seeded template database to use within your test. + + Testrunner->>PostgreSQL: Directly connect to the test database. + + Note over Testrunner,PostgreSQL: Run your test code! + + Testrunner-xPostgreSQL: Disconnect from the test database + + Note over Testrunner,PostgreSQL: Your test is finished. + + end +``` + +##### Optional: Manually unlocking a test database after a readonly test + +* Returns the given test DB directly to the pool, without cleaning (recreating it). +* **This is optional!** If you don't call this endpoints, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible, even though it actually had no changes. +* This is useful if you are sure, you did not do any changes to the database and thus want to skip the recreation process by returning it to the pool directly. + + +```mermaid +sequenceDiagram + + Note right of You: ... + + loop Each test + + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + IntegreSQL-->>Testrunner: StatusOK: 200 + + Testrunner->>PostgreSQL: Directly connect to the test database. + + Note over Testrunner,PostgreSQL: Run your **readonly** test code! + + Testrunner-xPostgreSQL: Disconnect from the test database + + Note over Testrunner,PostgreSQL: Your **readonly** test is finished.
As you did not modify the test database, you can unlock it again
(immediately available in the pool again). + + Testrunner->>IntegreSQL: ReturnTestDatabase: POST /api/v1/templates/:hash/tests/:id/unlock
(previously and soft-deprecated DELETE /api/v1/templates/:hash/tests/:id) + IntegreSQL-->>Testrunner: StatusOK: 200 + + end +``` + +##### Optional: Manually recreating a test database + +* Recreates the test DB according to the template and returns it back to the pool. +* **This is optional!** If you don't call this endpoint, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible. +* This is useful if you have parallel testing with a mix of very long and super short tests. Our auto–FIFO recreation handling might block there. + +```mermaid +sequenceDiagram + + Note right of You: ... + + loop Each test + + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + IntegreSQL-->>Testrunner: StatusOK: 200 + + Testrunner->>PostgreSQL: Directly connect to the test database. + + Note over Testrunner,PostgreSQL: Run your test code! + + Testrunner-xPostgreSQL: Disconnect from the test database + + Note over Testrunner,PostgreSQL: Your test is finished.
As you don't want to wait for FIFO autocleaning,
you can manually recreate the test database. + + Testrunner->>IntegreSQL: RecreateTestDatabase: POST /api/v1/templates/:hash/tests/:id/recreate + IntegreSQL-->>Testrunner: StatusOK: 200 + + end +``` + + +##### Failure modes while getting a new test database + +Some typical status codes you might encounter while getting a new test database. + +###### StatusNotFound 404 + +Well, seems like someone forgot to call InitializeTemplate or it errored out. + +###### StatusGone 410 + +There was an error during test setup with our fixtures, someone called `DiscardTemplate`, thus this template cannot be used. + +###### StatusServiceUnavailable 503 + +Well, typically a PostgreSQL connectivity problem + +#### Demo + +If you want to take a look on how we integrate IntegreSQL - 🤭 - please just try our [go-starter](https://github.com/allaboutapps/go-starter) project or take a look at our [test_database setup code](https://github.com/allaboutapps/go-starter/blob/master/internal/test/test_database.go). + +## Configuration + +IntegreSQL requires little configuration, all of which has to be provided via environment variables (due to the intended usage in a Docker environment). The following settings are available: + +| Description | Environment variable | Required | Default | +| ---------------------------------------------------------------------------------------------------- | --------------------------------------------------- | -------- | --------------------------------------------------------- | +| Server listen address (defaults to all if empty) | `INTEGRESQL_ADDRESS` | | `""` | +| Server port | `INTEGRESQL_PORT` | | `5000` | +| PostgreSQL: host | `INTEGRESQL_PGHOST`, `PGHOST` | Yes | `"127.0.0.1"` | +| PostgreSQL: port | `INTEGRESQL_PGPORT`, `PGPORT` | | `5432` | +| PostgreSQL: username | `INTEGRESQL_PGUSER`, `PGUSER`, `USER` | Yes | `"postgres"` | +| PostgreSQL: password | `INTEGRESQL_PGPASSWORD`, `PGPASSWORD` | Yes | `""` | +| PostgreSQL: database for manager | `INTEGRESQL_PGDATABASE` | | `"postgres"` | +| PostgreSQL: template database to use | `INTEGRESQL_ROOT_TEMPLATE` | | `"template0"` | +| Managed databases: prefix | `INTEGRESQL_DB_PREFIX` | | `"integresql"` | +| Managed *template* databases: prefix `integresql_template_` | `INTEGRESQL_TEMPLATE_DB_PREFIX` | | `"template"` | +| Managed *test* databases: prefix `integresql_test__` | `INTEGRESQL_TEST_DB_PREFIX` | | `"test"` | +| Managed *test* databases: username | `INTEGRESQL_TEST_PGUSER` | | PostgreSQL: username | +| Managed *test* databases: password | `INTEGRESQL_TEST_PGPASSWORD` | | PostgreSQL: password | +| Managed *test* databases: minimal test pool size | `INTEGRESQL_TEST_INITIAL_POOL_SIZE` | | [`runtime.NumCPU()`](https://pkg.go.dev/runtime#NumCPU) | +| Managed *test* databases: maximal test pool size | `INTEGRESQL_TEST_MAX_POOL_SIZE` | | [`runtime.NumCPU()*4`](https://pkg.go.dev/runtime#NumCPU) | +| Maximal number of pool tasks running in parallel | `INTEGRESQL_POOL_MAX_PARALLEL_TASKS` | | [`runtime.NumCPU()`](https://pkg.go.dev/runtime#NumCPU) | +| Minimal time to wait after a test db recreate has failed | `INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MIN_MS` | | `250`ms | +| The maximum possible sleep time between recreation retries | `INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MAX_MS` | | `3000`ms | +| Get test-database blocks auto-recreation (FIFO) for this duration | `INTEGRESQL_TEST_DB_MINIMAL_LIFETIME_MS` | | `250`ms | +| Internal time to wait for a template-database to transition into the 'finalized' state | `INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS` | | `60000`ms | +| Internal time to wait for a ready database | `INTEGRESQL_TEST_DB_GET_TIMEOUT_MS` | | `60000`ms | +| Enables [pprof debug endpoints](https://golang.org/pkg/net/http/pprof/) under `/debug/*` | `INTEGRESQL_DEBUG_ENDPOINTS` | | `false` | +| Enables [echo framework debug mode](https://echo.labstack.com/docs/customization) | `INTEGRESQL_ECHO_DEBUG` | | `false` | +| [Enables CORS](https://echo.labstack.com/docs/middleware/cors) | `INTEGRESQL_ECHO_ENABLE_CORS_MIDDLEWARE` | | `true` | +| [Enables logger](https://echo.labstack.com/docs/middleware/logger) | `INTEGRESQL_ECHO_ENABLE_LOGGER_MIDDLEWARE` | | `true` | +| [Enables recover](https://echo.labstack.com/docs/middleware/recover) | `INTEGRESQL_ECHO_ENABLE_RECOVER_MIDDLEWARE` | | `true` | +| [Sets request_id to context](https://echo.labstack.com/docs/middleware/request-id) | `INTEGRESQL_ECHO_ENABLE_REQUEST_ID_MIDDLEWARE` | | `true` | +| [Auto-adds trailing slash](https://echo.labstack.com/docs/middleware/trailing-slash) | `INTEGRESQL_ECHO_ENABLE_TRAILING_SLASH_MIDDLEWARE` | | `true` | +| [Enables timeout middleware](https://echo.labstack.com/docs/middleware/timeout) | `INTEGRESQL_ECHO_ENABLE_REQUEST_TIMEOUT_MIDDLEWARE` | | `true` | +| Generic timeout handling for most endpoints | `INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS` | | `60000`ms | +| Show logs of [severity](https://github.com/rs/zerolog?tab=readme-ov-file#leveled-logging) | `INTEGRESQL_LOGGER_LEVEL` | | `"info"` | +| Request log [severity]([severity](https://github.com/rs/zerolog?tab=readme-ov-file#leveled-logging)) | `INTEGRESQL_LOGGER_REQUEST_LEVEL` | | `"info"` | +| Should the request-log include the body? | `INTEGRESQL_LOGGER_LOG_REQUEST_BODY` | | `false` | +| Should the request-log include headers? | `INTEGRESQL_LOGGER_LOG_REQUEST_HEADER` | | `false` | +| Should the request-log include the query? | `INTEGRESQL_LOGGER_LOG_REQUEST_QUERY` | | `false` | +| Should the request-log include the response body? | `INTEGRESQL_LOGGER_LOG_RESPONSE_BODY` | | `false` | +| Should the request-log include the response header? | `INTEGRESQL_LOGGER_LOG_RESPONSE_HEADER` | | `false` | +| Should the console logger pretty-print the log (instead of json)? | `INTEGRESQL_LOGGER_PRETTY_PRINT_CONSOLE` | | `false` | + + +## Architecture + +### TestDatabase states + +The following describes the state and transitions of a TestDatabase. + +```mermaid +stateDiagram-v2 + + HashPool --> TestDatabase: Task EXTEND + + state TestDatabase { + [*] --> ready: init + ready --> dirty: GetTestDatabase() + dirty --> ready: ReturnTestDatabase() + dirty --> recreating: RecreateTestDatabase()\nTask CLEAN_DIRTY + recreating --> ready: generation++ + recreating --> recreating: retry (still in use) + } +``` + +### Pool structure + +The following describes the relationship between the components of IntegreSQL. + +```mermaid +erDiagram + Server ||--o| Manager : owns + Manager { + Template[] templateCollection + HashPool[] poolCollection + } + Manager ||--o{ HashPool : has + Manager ||--o{ Template : has + Template { + TemplateDatabase database + } + HashPool { + TestDatabase database + } + HashPool ||--o{ TestDatabase : "manages" + Template ||--|| TemplateDatabase : "sets" + TestDatabase { + int ID + Database database + } + TemplateDatabase { + Database database + } + Database { + string TemplateHash + Config DatabaseConfig + } + TestDatabase o|--|| Database : "is" + TemplateDatabase o|--|| Database : "is" +``` + + + ## Background We came a long way to realize that something just did not feel right with our PostgreSQL integration testing strategies. @@ -160,7 +675,7 @@ This is actually the (simplified) strategy, that we have used in [allaboutapps-b Here's a quick benchmark of how this strategy typically performed back then: -``` +```bash --- -------------------------------- --- replicas switched: 50 avg=11ms min=1ms max=445ms replicas awaited: 1 prebuffer=8 avg=436ms max=436ms @@ -192,7 +707,7 @@ The cool thing about having a warm pool of replicas setup in the background, is Let's look at a sightly bigger testsuite and see how this approach may possibly scale: -``` +```bash --- ----------------------------------- --- replicas switched: 280 avg=26ms min=11ms max=447ms replicas awaited: 1 prebuffer=8 avg=417ms max=417ms @@ -224,188 +739,87 @@ We realized that having the above pool logic directly within the test runner is As we switched to Go as our primary backend engineering language, we needed to rewrite the above logic anyways and decided to provide a safe and language agnostic way to utilize this testing strategy with PostgreSQL. -IntegreSQL is a RESTful JSON API distributed as Docker image or go cli. It's language agnostic and manages multiple [PostgreSQL templates](https://supabase.io/blog/2020/07/09/postgresql-templates/) and their separate pool of test databases for your tests. It keeps the pool of test databases warm (as it's running in the background) and is fit for parallel test execution with multiple test runners / processes. - -Our flow now finally changed to this: - -* **Start IntegreSQL** and leave it running **in the background** (your PostgreSQL template and test database pool will always be warm) -* ... -* 1..n test runners start in parallel -* Once per test runner process - * Get migrations/fixtures files `hash` over all related database files - * `InitializeTemplate: POST /templates`: attempt to create a new PostgreSQL template database identifying though the above hash `payload: {"hash": "string"}` - * `StatusOK: 200` - * Truncate - * Apply all migrations - * Seed all fixtures - * `FinalizeTemplate: PUT /templates/{hash}` - * If you encountered any template setup errors call `DiscardTemplate: DELETE /templates/{hash}` - * `StatusLocked: 423` - * Some other process has already recreated a PostgreSQL template database for this `hash` (or is currently doing it), you can just consider the template ready at this point. - * `StatusServiceUnavailable: 503` - * Typically happens if IntegreSQL cannot communicate with PostgreSQL, fail the test runner process -* **Before each** test `GetTestDatabase: GET /templates/{hash}/tests` - * Blocks until the template database is finalized (via `FinalizeTemplate`) - * `StatusOK: 200` - * You get a fully isolated PostgreSQL database from our already migrated/seeded template database to use within your test - * `StatusNotFound: 404` - * Well, seems like someone forgot to call `InitializeTemplate` or it errored out. - * `StatusGone: 410` - * There was an error during test setup with our fixtures, someone called `DiscardTemplate`, thus this template cannot be used. - * `StatusServiceUnavailable: 503` - * Well, typically a PostgreSQL connectivity problem -* Utilizing the isolated PostgreSQL test database received from IntegreSQL for each (parallel) test: - * **Run your test code** -* **After each** test optional: `ReturnTestDatabase: DELETE /templates/{hash}/tests/{test-database-id}` - * Marks the test database that it can be wiped early on pool limit overflow (or reused if `true` is submitted) -* 1..n test runners end -* ... -* Subsequent 1..n test runners start/end in parallel and reuse the above logic - -#### Integrate by client lib - -The flow above might look intimidating at first glance, but trust us, it's simple to integrate especially if there is already an client library available for your specific language. We currently have those: - -* Go: [integresql-client-go](https://github.com/allaboutapps/integresql-client-go) by [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) -* Python: [integresql-client-python](https://github.com/msztolcman/integresql-client-python) by [Marcin Sztolcman - @msztolcman](https://github.com/msztolcman) -* .NET: [IntegreSQL.EF](https://github.com/mcctomsk/IntegreSql.EF) by [Artur Drobinskiy - @Shaddix](https://github.com/Shaddix) -* JavaScript/TypeScript: [@devoxa/integresql-client](https://github.com/devoxa/integresql-client) by [Devoxa - @devoxa](https://github.com/devoxa) -* ... *Add your link here and make a PR* - -#### Integrate by RESTful JSON calls - -A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. - -#### Demo - -If you want to take a look on how we integrate IntegreSQL - 🤭 - please just try our [go-starter](https://github.com/allaboutapps/go-starter) project or take a look at our [testing setup code](https://github.com/allaboutapps/go-starter/blob/master/internal/test/testing.go). - -## Install - -### Install using Docker (preferred) - -A minimal Docker image containing a pre-built `IntegreSQL` executable is available at [Docker Hub](https://hub.docker.com/r/allaboutapps/integresql). - -```bash -docker pull allaboutapps/integresql -``` - -### Install locally - -Installing `IntegreSQL` locally requires a working [Go](https://golang.org/dl/) (1.14 or above) environment. Install the `IntegreSQL` executable to your Go bin folder: +This is how IntegreSQL was born. -```bash -go get github.com/allaboutapps/integresql/cmd/server -``` +## Benchmarks -## Configuration +### Benchmark v1.1.0 vs v1.0.0 -`IntegreSQL` requires little configuration, all of which has to be provided via environment variables (due to the intended usage in a Docker environment). The following settings are available: - -| Description | Environment variable | Default | Required | -| ----------------------------------------------------------------- | ------------------------------------- | -------------------- | -------- | -| IntegreSQL: listen address (defaults to all if empty) | `INTEGRESQL_ADDRESS` | `""` | | -| IntegreSQL: port | `INTEGRESQL_PORT` | `5000` | | -| PostgreSQL: host | `INTEGRESQL_PGHOST`, `PGHOST` | `"127.0.0.1"` | Yes | -| PostgreSQL: port | `INTEGRESQL_PGPORT`, `PGPORT` | `5432` | | -| PostgreSQL: username | `INTEGRESQL_PGUSER`, `PGUSER`, `USER` | `"postgres"` | Yes | -| PostgreSQL: password | `INTEGRESQL_PGPASSWORD`, `PGPASSWORD` | `""` | Yes | -| PostgreSQL: database for manager | `INTEGRESQL_PGDATABASE` | `"postgres"` | | -| PostgreSQL: template database to use | `INTEGRESQL_ROOT_TEMPLATE` | `"template0"` | | -| Managed databases: prefix | `INTEGRESQL_DB_PREFIX` | `"integresql"` | | -| Managed *template* databases: prefix `integresql_template_` | `INTEGRESQL_TEMPLATE_DB_PREFIX` | `"template"` | | -| Managed *test* databases: prefix `integresql_test__` | `INTEGRESQL_TEST_DB_PREFIX` | `"test"` | | -| Managed *test* databases: username | `INTEGRESQL_TEST_PGUSER` | PostgreSQL: username | | -| Managed *test* databases: password | `INTEGRESQL_TEST_PGPASSWORD` | PostgreSQL: password | | -| Managed *test* databases: minimal test pool size | `INTEGRESQL_TEST_INITIAL_POOL_SIZE` | `10` | | -| Managed *test* databases: maximal test pool size | `INTEGRESQL_TEST_MAX_POOL_SIZE` | `500` | | +We focued on improving the pool manager performance in v1.1.0, especially when it comes to locking and thus request latency. +![benchmark comparison v1.1.0](docs/benchmark_v1_1_0.png) -## Usage - -### Run using Docker (preferred) - -Simply start the `IntegreSQL` [Docker](https://docs.docker.com/install/) (19.03 or above) container, provide the required environment variables and expose the server port: +The main goal was to bring IntegreSQL's performance on par with our previous native Node.js implementation, which we also benchmarked: ```bash -docker run -d --name integresql -e INTEGRESQL_PORT=5000 -p 5000:5000 allaboutapps/integresql -``` - -`IntegreSQL` can also be included in your project via [Docker Compose](https://docs.docker.com/compose/install/) (1.25 or above): -```yaml -version: "3.4" -services: - - # Your main service image - service: - depends_on: - - postgres - - integresql - environment: - PGDATABASE: &PGDATABASE "development" - PGUSER: &PGUSER "dbuser" - PGPASSWORD: &PGPASSWORD "9bed16f749d74a3c8bfbced18a7647f5" - PGHOST: &PGHOST "postgres" - PGPORT: &PGPORT "5432" - PGSSLMODE: &PGSSLMODE "disable" - - # optional: env for integresql client testing - # see https://github.com/allaboutapps/integresql-client-go - # INTEGRESQL_CLIENT_BASE_URL: "http://integresql:5000/api" - - # [...] additional main service setup - - integresql: - image: allaboutapps/integresql:1.0.0 - ports: - - "5000:5000" - depends_on: - - postgres - environment: - PGHOST: *PGHOST - PGUSER: *PGUSER - PGPASSWORD: *PGPASSWORD - - postgres: - image: postgres:12.2-alpine # should be the same version as used live - # ATTENTION - # fsync=off, synchronous_commit=off and full_page_writes=off - # gives us a major speed up during local development and testing (~30%), - # however you should NEVER use these settings in PRODUCTION unless - # you want to have CORRUPTED data. - # DO NOT COPY/PASTE THIS BLINDLY. - # YOU HAVE BEEN WARNED. - # Apply some performance improvements to pg as these guarantees are not needed while running locally - command: "postgres -c 'shared_buffers=128MB' -c 'fsync=off' -c 'synchronous_commit=off' -c 'full_page_writes=off' -c 'max_connections=100' -c 'client_min_messages=warning'" - expose: - - "5432" - ports: - - "5432:5432" - environment: - POSTGRES_DB: *PGDATABASE - POSTGRES_USER: *PGUSER - POSTGRES_PASSWORD: *PGPASSWORD - volumes: - - pgvolume:/var/lib/postgresql/data - -volumes: - pgvolume: # declare a named volume to persist DB data +# Previous Node.js implementation +--- ----------------------------------- --- + replicas switched: 563 avg=14ms min=6ms max=316ms + replicas awaited: 1 prebuffer=8 avg=301ms max=301ms + background replicas: 571 avg=-ms min=-ms max=1180ms + - warm up: 32% 4041ms + * drop/cache check: 4% 561ms + * migrate/cache reuse: 25% 3177ms + * fixtures: 2% 302ms + * special: 0% 0ms + * create pool: 0% 1ms + - switching: 67% 8294ms + * disconnect: 1% 139ms + * switch slave: 4% 591ms + - resolve next: 2% 290ms + - await next: 2% 301ms + * reinitialize: 61% 7563ms + strategy related time: 12335ms + vs total executed time: 11% 106184ms +--- --------------------------------- --- +Done in 106.60s. + +# IntegreSQL v1.1.0 (next version) +--- ----------------------------------- --- + replicas switched: 563 avg=70ms min=58ms max=603ms + replicas awaited: 1 prebuffer=8 avg=72ms max=72ms + background replicas: 571 avg=58ms min=49ms max=520ms + - warm up: 9% 4101ms + * drop/cache check: 0% 1ms + * migrate/cache reuse: 8% 3520ms + * fixtures: 0% 296ms + * special: 0% 0ms + * create pool: 0% 284ms + - switching: 90% 39865ms + * disconnect: 0% 120ms + * switch replica: 0% 261ms (563x min=0ms q25=0ms q50=0ms q75=1ms q95=1ms max=72ms) + - resolve next: 0% 189ms + - await next: 0% 72ms + * reinitialize: 89% 39478ms (563x min=58ms q25=66ms q50=68ms q75=71ms q95=80ms max=531ms) + strategy related time: 43966ms + vs total executed time: 40% 109052ms +--- --------------------------------- --- +Done in 109.45s. + +# IntegreSQL v1.0.0 (previous version) +--- ----------------------------------- --- + replicas switched: 563 avg=131ms min=9ms max=2019ms + replicas awaited: 94 prebuffer=8 avg=590ms max=1997ms + background replicas: 571 avg=1292ms min=52ms max=3817ms + - warm up: 7% 6144ms + * drop/cache check: 0% 0ms + * migrate/cache reuse: 4% 3587ms + * fixtures: 0% 298ms + * special: 0% 0ms + * create pool: 2% 2259ms + - switching: 92% 73837ms + * disconnect: 0% 112ms + * switch replica: 64% 51552ms (563x min=0ms q25=0ms q50=0ms q75=1ms q95=773ms max=1997ms) + - resolve next: 5% 3922ms + - await next: 69% 55474ms + * reinitialize: 27% 22169ms (563x min=9ms q25=12ms q50=15ms q75=19ms q95=187ms max=1201ms) + strategy related time: 79981ms + vs total executed time: 51% 153889ms +--- --------------------------------- --- +Done in 154.29s. ``` -You may also refer to our [go-starter `docker-compose.yml`](https://github.com/allaboutapps/go-starter/blob/master/docker-compose.yml). - -### Run locally - -Running the `IntegreSQL` server locally requires configuration via exported environment variables (see below): - -```bash -export INTEGRESQL_PORT=5000 -export PGHOST=127.0.0.1 -export PGUSER=test -export PGPASSWORD=testpass -integresql -``` ## Contributing @@ -415,14 +829,14 @@ Please make sure to update tests as appropriate. ### Development setup -`IntegreSQL` requires the following local setup for development: +IntegreSQL requires the following local setup for development: - [Docker CE](https://docs.docker.com/install/) (19.03 or above) - [Docker Compose](https://docs.docker.com/compose/install/) (1.25 or above) The project makes use of the [devcontainer functionality](https://code.visualstudio.com/docs/remote/containers) provided by [Visual Studio Code](https://code.visualstudio.com/) so no local installation of a Go compiler is required when using VSCode as an IDE. -Should you prefer to develop `IntegreSQL` without the Docker setup, please ensure a working [Go](https://golang.org/dl/) (1.14 or above) environment has been configured as well as a PostgreSQL instance is available (tested against version 12 or above, but *should* be compatible to lower versions) and the appropriate environment variables have been configured as described in the [Install](#install) section. +Should you prefer to develop IntegreSQL without the Docker setup, please ensure a working [Go](https://golang.org/dl/) (1.14 or above) environment has been configured as well as a PostgreSQL instance is available (tested against version 12 or above, but *should* be compatible to lower versions) and the appropriate environment variables have been configured as described in the [Install](#install) section. ### Development quickstart @@ -455,9 +869,13 @@ integresql ## Maintainers -- [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) - [Mario Ranftl - @majodev](https://github.com/majodev) +### Previous maintainers + +- [Anna - @anjankow](https://github.com/anjankow) +- [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) + ## License -[MIT](LICENSE) © 2020 aaa – all about apps GmbH | Nick Müller | Mario Ranftl and the `IntegreSQL` project contributors +[MIT](LICENSE) © 2020-2024 aaa – all about apps GmbH | Nick Müller | Mario Ranftl and the IntegreSQL project contributors diff --git a/cmd/server/main.go b/cmd/server/main.go index f23b3aa..9ab58f2 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -2,7 +2,7 @@ package main import ( "context" - "log" + "errors" "net/http" "os" "os/signal" @@ -10,21 +10,41 @@ import ( "time" "github.com/allaboutapps/integresql/internal/api" + "github.com/allaboutapps/integresql/internal/config" "github.com/allaboutapps/integresql/internal/router" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" ) func main() { - s := api.DefaultServerFromEnv() + + cfg := api.DefaultServerConfigFromEnv() + + zerolog.TimeFieldFormat = time.RFC3339Nano + zerolog.SetGlobalLevel(cfg.Logger.Level) + if cfg.Logger.PrettyPrintConsole { + log.Logger = log.Output(zerolog.NewConsoleWriter(func(w *zerolog.ConsoleWriter) { + w.TimeFormat = "15:04:05" + })) + } + + log.Info().Str("version", config.GetFormattedBuildArgs()).Msg("starting...") + + s := api.NewServer(cfg) if err := s.InitManager(context.Background()); err != nil { - log.Fatalf("Failed to initialize manager: %v", err) + log.Fatal().Err(err).Msg("Failed to initialize manager") } router.Init(s) go func() { if err := s.Start(); err != nil { - log.Fatalf("Failed to start server: %v", err) + if errors.Is(err, http.ErrServerClosed) { + log.Info().Msg("Server closed") + } else { + log.Fatal().Err(err).Msg("Failed to start server") + } } }() @@ -35,7 +55,7 @@ func main() { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - if err := s.Shutdown(ctx); err != nil && err != http.ErrServerClosed { - log.Fatalf("Failed to gracefully shut down server: %v", err) + if err := s.Shutdown(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) { + log.Fatal().Err(err).Msg("Failed to gracefully shut down server") } } diff --git a/docker-compose.yml b/docker-compose.yml index 91b5922..70acc3e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -5,12 +5,26 @@ services: build: context: . target: development - ports: - - "5000:5000" + # ports: + # - "5000:5000" working_dir: /app + # linux permissions / vscode support: we must explicitly run as the development user + user: development volumes: - - .:/app #:delegated - # - ./.pkg:/go/pkg # enable this to reuse the pkg cache + # mount working directory + # https://code.visualstudio.com/docs/remote/containers-advanced#_update-the-mount-consistency-to-delegated-for-macos + # https://docs.docker.com/docker-for-mac/osxfs-caching/#delegated + # the container’s view is authoritative (permit delays before updates on the container appear in the host) + - .:/app:delegated + + # mount cached go pkg downloads + - go-pkg:/go/pkg + + # mount cached vscode container extensions + # https://code.visualstudio.com/docs/remote/containers-advanced#_avoiding-extension-reinstalls-on-container-rebuild + - vscode-extensions:/home/development/.vscode-server/extensions + - vscode-extensions-insiders:/home/development/.vscode-server-insiders/extensions + depends_on: - postgres environment: &SERVICE_ENV @@ -28,15 +42,21 @@ services: - seccomp:unconfined # Overrides default command so things don't shut down after the process ends. - command: /bin/sh -c "while sleep 1000; do :; done" + # Overrides default command so things don't shut down after the process ends. + command: + - /bin/sh + - -c + - | + git config --global --add safe.directory /app + while sleep 1000; do :; done postgres: - image: postgres:12.2-alpine # should be the same version as used in .drone.yml, Dockerfile and live + image: postgres:12.4-alpine # should be the same version as used in .drone.yml, Dockerfile and live command: "postgres -c 'shared_buffers=128MB' -c 'fsync=off' -c 'synchronous_commit=off' -c 'full_page_writes=off' -c 'max_connections=100' -c 'client_min_messages=warning'" expose: - "5432" ports: - - "5432:5432" + - "5434:5432" environment: POSTGRES_DB: *PSQL_DBNAME POSTGRES_USER: *PSQL_USER @@ -46,3 +66,11 @@ services: volumes: pgvolume: # declare a named volume to persist DB data + + # go: go mod cached downloads + go-pkg: + + # vscode: Avoiding extension reinstalls on container rebuild + # https://code.visualstudio.com/docs/remote/containers-advanced#_avoiding-extension-reinstalls-on-container-rebuild + vscode-extensions: + vscode-extensions-insiders: diff --git a/docs/benchmark_v1_1_0.png b/docs/benchmark_v1_1_0.png new file mode 100644 index 0000000..ab4cb06 Binary files /dev/null and b/docs/benchmark_v1_1_0.png differ diff --git a/go.mod b/go.mod index 6d76eaa..85a522b 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,31 @@ module github.com/allaboutapps/integresql -go 1.14 +go 1.20 require ( - github.com/labstack/echo/v4 v4.1.16 - github.com/lib/pq v1.3.0 - golang.org/x/crypto v0.0.0-20200420104511-884d27f42877 // indirect - golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect - golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect + github.com/google/uuid v1.6.0 + github.com/labstack/echo/v4 v4.11.4 + github.com/lib/pq v1.10.9 + github.com/rs/zerolog v1.31.0 + github.com/stretchr/testify v1.8.4 + golang.org/x/sync v0.6.0 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/labstack/gommon v0.4.2 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect + golang.org/x/crypto v0.18.0 // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 51a1bdc..2c1be9a 100644 --- a/go.sum +++ b/go.sum @@ -1,55 +1,63 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/labstack/echo/v4 v4.1.16 h1:8swiwjE5Jkai3RPfZoahp8kjVCRNq+y7Q0hPji2Kz0o= -github.com/labstack/echo/v4 v4.1.16/go.mod h1:awO+5TzAjvL8XpibdsfXxPgHr+orhtXZJZIQCVjogKI= -github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.11.4 h1:vDZmA+qNeh1pd/cCkEicDMrjtrnMGQ1QFI9gWN1zGq8= +github.com/labstack/echo/v4 v4.11.4/go.mod h1:noh7EvLwqDsmh/X/HWKPUl1AjzJrhyptRyEbQJfxen8= +github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= +github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= +github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.1.0 h1:RZqt0yGBsps8NGvLSGW804QQqCUYYLsaOjTVHy1Ocw4= -github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200420104511-884d27f42877 h1:IhZPbxNd1UjBCaD5AfpSSbJTRlp+ZSuyuH5uoksNS04= -golang.org/x/crypto v0.0.0-20200420104511-884d27f42877/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/api/admin/admin.go b/internal/api/admin/admin.go index bb6a68b..3bf1662 100644 --- a/internal/api/admin/admin.go +++ b/internal/api/admin/admin.go @@ -9,7 +9,8 @@ import ( func deleteResetAllTemplates(s *api.Server) echo.HandlerFunc { return func(c echo.Context) error { - if err := s.Manager.ResetAllTracking(); err != nil { + ctx := c.Request().Context() + if err := s.Manager.ResetAllTracking(ctx); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } diff --git a/internal/api/middleware/logger.go b/internal/api/middleware/logger.go new file mode 100644 index 0000000..7135b5b --- /dev/null +++ b/internal/api/middleware/logger.go @@ -0,0 +1,307 @@ +package middleware + +import ( + "bufio" + "bytes" + "context" + "io" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/allaboutapps/integresql/pkg/util" + "github.com/labstack/echo/v4" + "github.com/labstack/echo/v4/middleware" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +// RequestBodyLogSkipper defines a function to skip logging certain request bodies. +// Returning true skips logging the payload of the request. +type RequestBodyLogSkipper func(req *http.Request) bool + +// DefaultRequestBodyLogSkipper returns true for all requests with Content-Type +// application/x-www-form-urlencoded or multipart/form-data as those might contain +// binary or URL-encoded file uploads unfit for logging purposes. +func DefaultRequestBodyLogSkipper(req *http.Request) bool { + contentType := req.Header.Get(echo.HeaderContentType) + switch { + case strings.HasPrefix(contentType, echo.MIMEApplicationForm), + strings.HasPrefix(contentType, echo.MIMEMultipartForm): + return true + default: + return false + } +} + +// ResponseBodyLogSkipper defines a function to skip logging certain response bodies. +// Returning true skips logging the payload of the response. +type ResponseBodyLogSkipper func(req *http.Request, res *echo.Response) bool + +// DefaultResponseBodyLogSkipper returns false for all responses with Content-Type +// application/json, preventing logging for all other types of payloads as those +// might contain binary or URL-encoded data unfit for logging purposes. +func DefaultResponseBodyLogSkipper(_ *http.Request, res *echo.Response) bool { + contentType := res.Header().Get(echo.HeaderContentType) + switch { + case strings.HasPrefix(contentType, echo.MIMEApplicationJSON): + return false + default: + return true + } +} + +// BodyLogReplacer defines a function to replace certain parts of a body before logging it, +// mainly used to strip sensitive information from a request or response payload. +// The []byte returned should contain a sanitized payload ready for logging. +type BodyLogReplacer func(body []byte) []byte + +// DefaultBodyLogReplacer returns the body received without any modifications. +func DefaultBodyLogReplacer(body []byte) []byte { + return body +} + +// HeaderLogReplacer defines a function to replace certain parts of a header before logging it, +// mainly used to strip sensitive information from a request or response header. +// The http.Header returned should be a sanitized copy of the original header as not to modify +// the request or response while logging. +type HeaderLogReplacer func(header http.Header) http.Header + +// DefaultHeaderLogReplacer replaces all Authorization, X-CSRF-Token and Proxy-Authorization +// header entries with a redacted string, indicating their presence without revealing actual, +// potentially sensitive values in the logs. +func DefaultHeaderLogReplacer(header http.Header) http.Header { + sanitizedHeader := http.Header{} + + for k, vv := range header { + shouldRedact := strings.EqualFold(k, echo.HeaderAuthorization) || + strings.EqualFold(k, echo.HeaderXCSRFToken) || + strings.EqualFold(k, "Proxy-Authorization") + + for _, v := range vv { + if shouldRedact { + sanitizedHeader.Add(k, "*****REDACTED*****") + } else { + sanitizedHeader.Add(k, v) + } + } + } + + return sanitizedHeader +} + +// QueryLogReplacer defines a function to replace certain parts of a URL query before logging it, +// mainly used to strip sensitive information from a request query. +// The url.Values returned should be a sanitized copy of the original query as not to modify the +// request while logging. +type QueryLogReplacer func(query url.Values) url.Values + +// DefaultQueryLogReplacer returns the query received without any modifications. +func DefaultQueryLogReplacer(query url.Values) url.Values { + return query +} + +var ( + DefaultLoggerConfig = LoggerConfig{ + Skipper: middleware.DefaultSkipper, + Level: zerolog.DebugLevel, + LogRequestBody: false, + LogRequestHeader: false, + LogRequestQuery: false, + RequestBodyLogSkipper: DefaultRequestBodyLogSkipper, + RequestBodyLogReplacer: DefaultBodyLogReplacer, + RequestHeaderLogReplacer: DefaultHeaderLogReplacer, + RequestQueryLogReplacer: DefaultQueryLogReplacer, + LogResponseBody: false, + LogResponseHeader: false, + ResponseBodyLogSkipper: DefaultResponseBodyLogSkipper, + ResponseBodyLogReplacer: DefaultBodyLogReplacer, + } +) + +type LoggerConfig struct { + Skipper middleware.Skipper + Level zerolog.Level + LogRequestBody bool + LogRequestHeader bool + LogRequestQuery bool + RequestBodyLogSkipper RequestBodyLogSkipper + RequestBodyLogReplacer BodyLogReplacer + RequestHeaderLogReplacer HeaderLogReplacer + RequestQueryLogReplacer QueryLogReplacer + LogResponseBody bool + LogResponseHeader bool + ResponseBodyLogSkipper ResponseBodyLogSkipper + ResponseBodyLogReplacer BodyLogReplacer + ResponseHeaderLogReplacer HeaderLogReplacer +} + +// Logger with default logger output and configuration +func Logger() echo.MiddlewareFunc { + return LoggerWithConfig(DefaultLoggerConfig, nil) +} + +// LoggerWithConfig returns a new MiddlewareFunc which creates a logger with the desired configuration. +// If output is set to nil, the default output is used. If more output params are provided, the first is being used. +func LoggerWithConfig(config LoggerConfig, output ...io.Writer) echo.MiddlewareFunc { + if config.Skipper == nil { + config.Skipper = DefaultLoggerConfig.Skipper + } + if config.RequestBodyLogSkipper == nil { + config.RequestBodyLogSkipper = DefaultRequestBodyLogSkipper + } + if config.RequestBodyLogReplacer == nil { + config.RequestBodyLogReplacer = DefaultBodyLogReplacer + } + if config.RequestHeaderLogReplacer == nil { + config.RequestHeaderLogReplacer = DefaultHeaderLogReplacer + } + if config.RequestQueryLogReplacer == nil { + config.RequestQueryLogReplacer = DefaultQueryLogReplacer + } + if config.ResponseBodyLogSkipper == nil { + config.ResponseBodyLogSkipper = DefaultResponseBodyLogSkipper + } + if config.ResponseBodyLogReplacer == nil { + config.ResponseBodyLogReplacer = DefaultBodyLogReplacer + } + if config.ResponseHeaderLogReplacer == nil { + config.ResponseHeaderLogReplacer = DefaultHeaderLogReplacer + } + + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + if config.Skipper(c) { + return next(c) + } + + req := c.Request() + res := c.Response() + + id := req.Header.Get(echo.HeaderXRequestID) + if len(id) == 0 { + id = res.Header().Get(echo.HeaderXRequestID) + } + + in := req.Header.Get(echo.HeaderContentLength) + if len(in) == 0 { + in = "0" + } + + l := log.With(). + Dict("req", zerolog.Dict(). + Str("id", id). + Str("host", req.Host). + Str("method", req.Method). + Str("url", req.URL.String()). + Str("bytes_in", in), + ).Logger() + + if len(output) > 0 { + l = l.Output(output[0]) + } + + le := l.WithLevel(config.Level) + req = req.WithContext(l.WithContext(context.WithValue(req.Context(), util.CTXKeyRequestID, id))) + + if config.LogRequestBody && !config.RequestBodyLogSkipper(req) { + var reqBody []byte + var err error + if req.Body != nil { + reqBody, err = io.ReadAll(req.Body) + if err != nil { + l.Error().Err(err).Msg("Failed to read body while logging request") + return err + } + + req.Body = io.NopCloser(bytes.NewBuffer(reqBody)) + } + + le = le.Bytes("req_body", config.RequestBodyLogReplacer(reqBody)) + } + if config.LogRequestHeader { + header := zerolog.Dict() + for k, v := range config.RequestHeaderLogReplacer(req.Header) { + header.Strs(k, v) + } + + le = le.Dict("req_header", header) + } + if config.LogRequestQuery { + query := zerolog.Dict() + for k, v := range req.URL.Query() { + query.Strs(k, v) + } + + le = le.Dict("req_query", query) + } + + le.Msg("Request received") + + c.SetRequest(req) + + var resBody bytes.Buffer + if config.LogResponseBody { + mw := io.MultiWriter(res.Writer, &resBody) + writer := &bodyDumpResponseWriter{Writer: mw, ResponseWriter: res.Writer} + res.Writer = writer + } + + start := time.Now() + err := next(c) + if err != nil { + c.Error(err) + } + stop := time.Now() + + // Retrieve logger from context again since other middlewares might have enhanced it + ll := util.LogFromEchoContext(c) + lle := ll.WithLevel(config.Level). + Dict("res", zerolog.Dict(). + Int("status", res.Status). + Int64("bytes_out", res.Size). + TimeDiff("duration_ms", stop, start). + Err(err), + ) + + if config.LogResponseBody && !config.ResponseBodyLogSkipper(req, res) { + lle = lle.Bytes("res_body", config.ResponseBodyLogReplacer(resBody.Bytes())) + } + if config.LogResponseHeader { + header := zerolog.Dict() + for k, v := range config.ResponseHeaderLogReplacer(res.Header()) { + header.Strs(k, v) + } + + lle = lle.Dict("res_header", header) + } + + lle.Msg("Response sent") + + return nil + } + } +} + +type bodyDumpResponseWriter struct { + io.Writer + http.ResponseWriter +} + +func (w *bodyDumpResponseWriter) WriteHeader(code int) { + w.ResponseWriter.WriteHeader(code) +} + +func (w *bodyDumpResponseWriter) Write(b []byte) (int, error) { + return w.Writer.Write(b) +} + +func (w *bodyDumpResponseWriter) Flush() { + w.ResponseWriter.(http.Flusher).Flush() +} + +func (w *bodyDumpResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return w.ResponseWriter.(http.Hijacker).Hijack() +} diff --git a/internal/api/server.go b/internal/api/server.go index af9b540..e4a4689 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -8,6 +8,9 @@ import ( "net" "time" + // #nosec G108 - pprof handlers (conditionally made available via http.DefaultServeMux within router) + _ "net/http/pprof" + "github.com/allaboutapps/integresql/pkg/manager" "github.com/allaboutapps/integresql/pkg/util" "github.com/labstack/echo/v4" diff --git a/internal/api/server_config.go b/internal/api/server_config.go index 8294238..8840e63 100644 --- a/internal/api/server_config.go +++ b/internal/api/server_config.go @@ -1,15 +1,70 @@ package api -import "github.com/allaboutapps/integresql/pkg/util" +import ( + "time" + + "github.com/allaboutapps/integresql/pkg/util" + "github.com/rs/zerolog" +) type ServerConfig struct { - Address string - Port int + Address string + Port int + DebugEndpoints bool + Logger LoggerConfig + Echo EchoConfig +} + +type EchoConfig struct { + Debug bool + ListenAddress string + EnableCORSMiddleware bool + EnableLoggerMiddleware bool + EnableRecoverMiddleware bool + EnableRequestIDMiddleware bool + EnableTrailingSlashMiddleware bool + EnableTimeoutMiddleware bool + RequestTimeout time.Duration +} + +type LoggerConfig struct { + Level zerolog.Level + RequestLevel zerolog.Level + LogRequestBody bool + LogRequestHeader bool + LogRequestQuery bool + LogResponseBody bool + LogResponseHeader bool + PrettyPrintConsole bool } func DefaultServerConfigFromEnv() ServerConfig { return ServerConfig{ - Address: util.GetEnv("INTEGRESQL_ADDRESS", ""), - Port: util.GetEnvAsInt("INTEGRESQL_PORT", 5000), + Address: util.GetEnv("INTEGRESQL_ADDRESS", ""), + Port: util.GetEnvAsInt("INTEGRESQL_PORT", 5000), + DebugEndpoints: util.GetEnvAsBool("INTEGRESQL_DEBUG_ENDPOINTS", false), // https://golang.org/pkg/net/http/pprof/ + Echo: EchoConfig{ + Debug: util.GetEnvAsBool("INTEGRESQL_ECHO_DEBUG", false), + EnableCORSMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_CORS_MIDDLEWARE", true), + EnableLoggerMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_LOGGER_MIDDLEWARE", true), + EnableRecoverMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_RECOVER_MIDDLEWARE", true), + EnableRequestIDMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_REQUEST_ID_MIDDLEWARE", true), + EnableTrailingSlashMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_TRAILING_SLASH_MIDDLEWARE", true), + EnableTimeoutMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_REQUEST_TIMEOUT_MIDDLEWARE", true), + + // typically these timeouts should be the same as INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS and INTEGRESQL_TEST_DB_GET_TIMEOUT_MS + // pkg/manager/manager_config.go + RequestTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS", 60*1000 /*1 min*/)), // affects INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS and INTEGRESQL_TEST_DB_GET_TIMEOUT_MS + }, + Logger: LoggerConfig{ + Level: util.LogLevelFromString(util.GetEnv("INTEGRESQL_LOGGER_LEVEL", zerolog.InfoLevel.String())), + RequestLevel: util.LogLevelFromString(util.GetEnv("INTEGRESQL_LOGGER_REQUEST_LEVEL", zerolog.InfoLevel.String())), + LogRequestBody: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_REQUEST_BODY", false), + LogRequestHeader: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_REQUEST_HEADER", false), + LogRequestQuery: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_REQUEST_QUERY", false), + LogResponseBody: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_RESPONSE_BODY", false), + LogResponseHeader: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_RESPONSE_HEADER", false), + PrettyPrintConsole: util.GetEnvAsBool("INTEGRESQL_LOGGER_PRETTY_PRINT_CONSOLE", false), + }, } } diff --git a/internal/api/templates/routes.go b/internal/api/templates/routes.go index 0e66c99..2ede71f 100644 --- a/internal/api/templates/routes.go +++ b/internal/api/templates/routes.go @@ -9,5 +9,9 @@ func InitRoutes(s *api.Server) { g.PUT("/:hash", putFinalizeTemplate(s)) g.DELETE("/:hash", deleteDiscardTemplate(s)) g.GET("/:hash/tests", getTestDatabase(s)) - g.DELETE("/:hash/tests/:id", deleteReturnTestDatabase(s)) + g.DELETE("/:hash/tests/:id", deleteReturnTestDatabase(s)) // deprecated, use POST /unlock instead + + g.POST("/:hash/tests/:id/recreate", postRecreateTestDatabase(s)) + g.POST("/:hash/tests/:id/unlock", postUnlockTestDatabase(s)) + } diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index 3dacac5..bc10c84 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -1,13 +1,13 @@ package templates import ( - "context" + "errors" "net/http" "strconv" - "time" "github.com/allaboutapps/integresql/internal/api" "github.com/allaboutapps/integresql/pkg/manager" + "github.com/allaboutapps/integresql/pkg/pool" "github.com/labstack/echo/v4" ) @@ -27,19 +27,16 @@ func postInitializeTemplate(s *api.Server) echo.HandlerFunc { return echo.NewHTTPError(http.StatusBadRequest, "hash is required") } - ctx, cancel := context.WithTimeout(c.Request().Context(), 30*time.Second) - defer cancel() - - template, err := s.Manager.InitializeTemplateDatabase(ctx, payload.Hash) + template, err := s.Manager.InitializeTemplateDatabase(c.Request().Context(), payload.Hash) if err != nil { - switch err { - case manager.ErrManagerNotReady: + if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable - case manager.ErrTemplateAlreadyInitialized: + } else if errors.Is(err, manager.ErrTemplateAlreadyInitialized) { return echo.NewHTTPError(http.StatusLocked, "template is already initialized") - default: - return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } + + // default 500 + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } return c.JSON(http.StatusOK, &template) @@ -50,18 +47,18 @@ func putFinalizeTemplate(s *api.Server) echo.HandlerFunc { return func(c echo.Context) error { hash := c.Param("hash") - ctx, cancel := context.WithTimeout(c.Request().Context(), 10*time.Second) - defer cancel() - - if _, err := s.Manager.FinalizeTemplateDatabase(ctx, hash); err != nil { - switch err { - case manager.ErrManagerNotReady: + if _, err := s.Manager.FinalizeTemplateDatabase(c.Request().Context(), hash); err != nil { + if errors.Is(err, manager.ErrTemplateAlreadyInitialized) { + // template is initialized, we ignore this error + return c.NoContent(http.StatusNoContent) + } else if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable - case manager.ErrTemplateNotFound: + } else if errors.Is(err, manager.ErrTemplateNotFound) { return echo.NewHTTPError(http.StatusNotFound, "template not found") - default: - return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } + + // default 500 + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } return c.NoContent(http.StatusNoContent) @@ -72,18 +69,15 @@ func deleteDiscardTemplate(s *api.Server) echo.HandlerFunc { return func(c echo.Context) error { hash := c.Param("hash") - ctx, cancel := context.WithTimeout(c.Request().Context(), 10*time.Second) - defer cancel() - - if err := s.Manager.DiscardTemplateDatabase(ctx, hash); err != nil { - switch err { - case manager.ErrManagerNotReady: + if err := s.Manager.DiscardTemplateDatabase(c.Request().Context(), hash); err != nil { + if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable - case manager.ErrTemplateNotFound: + } else if errors.Is(err, manager.ErrTemplateNotFound) { return echo.NewHTTPError(http.StatusNotFound, "template not found") - default: - return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } + + // default 500 + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } return c.NoContent(http.StatusNoContent) @@ -91,31 +85,35 @@ func deleteDiscardTemplate(s *api.Server) echo.HandlerFunc { } func getTestDatabase(s *api.Server) echo.HandlerFunc { + return func(c echo.Context) error { hash := c.Param("hash") - ctx, cancel := context.WithTimeout(c.Request().Context(), 1*time.Minute) - defer cancel() - - test, err := s.Manager.GetTestDatabase(ctx, hash) + test, err := s.Manager.GetTestDatabase(c.Request().Context(), hash) if err != nil { - switch err { - case manager.ErrManagerNotReady: + + if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable - case manager.ErrTemplateNotFound: + } else if errors.Is(err, manager.ErrTemplateNotFound) { return echo.NewHTTPError(http.StatusNotFound, "template not found") - case manager.ErrDatabaseDiscarded: + } else if errors.Is(err, manager.ErrTemplateDiscarded) { return echo.NewHTTPError(http.StatusGone, "template was just discarded") - default: - return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } + + // default 500 + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } return c.JSON(http.StatusOK, &test) } } +// deprecated func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { + return postUnlockTestDatabase(s) +} + +func postUnlockTestDatabase(s *api.Server) echo.HandlerFunc { return func(c echo.Context) error { hash := c.Param("hash") id, err := strconv.Atoi(c.Param("id")) @@ -123,20 +121,47 @@ func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { return echo.NewHTTPError(http.StatusBadRequest, "invalid test database ID") } - ctx, cancel := context.WithTimeout(c.Request().Context(), 10*time.Second) - defer cancel() + if err := s.Manager.ReturnTestDatabase(c.Request().Context(), hash, id); err != nil { + if errors.Is(err, manager.ErrManagerNotReady) { + return echo.ErrServiceUnavailable + } else if errors.Is(err, manager.ErrTemplateNotFound) { + return echo.NewHTTPError(http.StatusNotFound, "template not found") + } else if errors.Is(err, manager.ErrTestNotFound) { + return echo.NewHTTPError(http.StatusNotFound, "test database not found") + } else if errors.Is(err, pool.ErrTestDBInUse) { + return echo.NewHTTPError(http.StatusLocked, pool.ErrTestDBInUse.Error()) + } - if err := s.Manager.ReturnTestDatabase(ctx, hash, id); err != nil { - switch err { - case manager.ErrManagerNotReady: + // default 500 + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) + } + + return c.NoContent(http.StatusNoContent) + } +} + +func postRecreateTestDatabase(s *api.Server) echo.HandlerFunc { + return func(c echo.Context) error { + hash := c.Param("hash") + id, err := strconv.Atoi(c.Param("id")) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, "invalid test database ID") + } + + if err := s.Manager.RecreateTestDatabase(c.Request().Context(), hash, id); err != nil { + + if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable - case manager.ErrTemplateNotFound: + } else if errors.Is(err, manager.ErrTemplateNotFound) { return echo.NewHTTPError(http.StatusNotFound, "template not found") - case manager.ErrTestNotFound: + } else if errors.Is(err, manager.ErrTestNotFound) { return echo.NewHTTPError(http.StatusNotFound, "test database not found") - default: - return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) + } else if errors.Is(err, pool.ErrTestDBInUse) { + return echo.NewHTTPError(http.StatusLocked, pool.ErrTestDBInUse.Error()) } + + // default 500 + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } return c.NoContent(http.StatusNoContent) diff --git a/internal/config/build_args.go b/internal/config/build_args.go new file mode 100644 index 0000000..7972bc2 --- /dev/null +++ b/internal/config/build_args.go @@ -0,0 +1,18 @@ +package config + +import "fmt" + +// The following vars are automatically injected via -ldflags. +// See Makefile target "make go-build" and make var $(LDFLAGS). +// No need to change them here. +// https://www.digitalocean.com/community/tutorials/using-ldflags-to-set-version-information-for-go-applications +var ( + ModuleName = "build.local/misses/ldflags" // e.g. "allaboutapps.dev/aw/go-starter" + Commit = "< 40 chars git commit hash via ldflags >" // e.g. "59cb7684dd0b0f38d68cd7db657cb614feba8f7e" + BuildDate = "1970-01-01T00:00:00+00:00" // e.g. "1970-01-01T00:00:00+00:00" +) + +// GetFormattedBuildArgs returns string representation of buildsargs set via ldflags " @ ()" +func GetFormattedBuildArgs() string { + return fmt.Sprintf("%v @ %v (%v)", ModuleName, Commit, BuildDate) +} diff --git a/internal/router/echo_logger.go b/internal/router/echo_logger.go new file mode 100644 index 0000000..0931855 --- /dev/null +++ b/internal/router/echo_logger.go @@ -0,0 +1,13 @@ +package router + +import "github.com/rs/zerolog" + +type echoLogger struct { + level zerolog.Level + log zerolog.Logger +} + +func (l *echoLogger) Write(p []byte) (n int, err error) { + l.log.WithLevel(l.level).Msgf("%s", p) + return len(p), nil +} diff --git a/internal/router/router.go b/internal/router/router.go index e3df24c..0ff08c3 100644 --- a/internal/router/router.go +++ b/internal/router/router.go @@ -1,24 +1,76 @@ package router import ( + "net/http" + "github.com/allaboutapps/integresql/internal/api" "github.com/allaboutapps/integresql/internal/api/admin" + "github.com/allaboutapps/integresql/internal/api/middleware" "github.com/allaboutapps/integresql/internal/api/templates" "github.com/labstack/echo/v4" echoMiddleware "github.com/labstack/echo/v4/middleware" + "github.com/rs/zerolog/log" ) func Init(s *api.Server) { s.Echo = echo.New() - s.Echo.Debug = false + s.Echo.Debug = s.Config.Echo.Debug s.Echo.HideBanner = true + s.Echo.Logger.SetOutput(&echoLogger{level: s.Config.Logger.RequestLevel, log: log.With().Str("component", "echo").Logger()}) + + // --- + // General middleware + if s.Config.Echo.EnableTrailingSlashMiddleware { + s.Echo.Pre(echoMiddleware.RemoveTrailingSlash()) + } else { + log.Warn().Msg("Disabling trailing slash middleware due to environment config") + } + + if s.Config.Echo.EnableRecoverMiddleware { + s.Echo.Use(echoMiddleware.Recover()) + } else { + log.Warn().Msg("Disabling recover middleware due to environment config") + } + + if s.Config.Echo.EnableRequestIDMiddleware { + s.Echo.Use(echoMiddleware.RequestID()) + } else { + log.Warn().Msg("Disabling request ID middleware due to environment config") + } + + if s.Config.Echo.EnableLoggerMiddleware { + s.Echo.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{ + Level: s.Config.Logger.RequestLevel, + LogRequestBody: s.Config.Logger.LogRequestBody, + LogRequestHeader: s.Config.Logger.LogRequestHeader, + LogRequestQuery: s.Config.Logger.LogRequestQuery, + LogResponseBody: s.Config.Logger.LogResponseBody, + LogResponseHeader: s.Config.Logger.LogResponseHeader, + RequestBodyLogSkipper: func(req *http.Request) bool { + return middleware.DefaultRequestBodyLogSkipper(req) + }, + ResponseBodyLogSkipper: func(req *http.Request, res *echo.Response) bool { + return middleware.DefaultResponseBodyLogSkipper(req, res) + }, + Skipper: func(c echo.Context) bool { + return false + }, + })) + } else { + log.Warn().Msg("Disabling logger middleware due to environment config") + } - s.Echo.Pre(echoMiddleware.RemoveTrailingSlash()) + if s.Config.Echo.EnableTimeoutMiddleware { + s.Echo.Use(echoMiddleware.TimeoutWithConfig(echoMiddleware.TimeoutConfig{ + Timeout: s.Config.Echo.RequestTimeout, + })) + } - s.Echo.Use(echoMiddleware.Recover()) - s.Echo.Use(echoMiddleware.RequestID()) - s.Echo.Use(echoMiddleware.Logger()) + // enable debug endpoints only if requested + if s.Config.DebugEndpoints { + s.Echo.GET("/debug/*", echo.WrapHandler(http.DefaultServeMux)) + } admin.InitRoutes(s) templates.InitRoutes(s) diff --git a/internal/router/router_test.go b/internal/router/router_test.go new file mode 100644 index 0000000..c393096 --- /dev/null +++ b/internal/router/router_test.go @@ -0,0 +1,35 @@ +package router_test + +import ( + "testing" + + "github.com/allaboutapps/integresql/internal/api" + "github.com/allaboutapps/integresql/internal/test" + "github.com/stretchr/testify/require" +) + +func TestPprofEnabledNoAuth(t *testing.T) { + config := api.DefaultServerConfigFromEnv() + + // these are typically our default values, however we force set them here to ensure those are set while test execution. + config.DebugEndpoints = true + + test.WithTestServerConfigurable(t, config, func(s *api.Server) { + res := test.PerformRequest(t, s, "GET", "/debug/pprof/heap/", nil, nil) + require.Equal(t, 200, res.Result().StatusCode) + + // index + res = test.PerformRequest(t, s, "GET", "/debug/pprof/", nil, nil) + require.Equal(t, 301, res.Result().StatusCode) + }) +} + +func TestPprofDisabled(t *testing.T) { + config := api.DefaultServerConfigFromEnv() + config.DebugEndpoints = false + + test.WithTestServerConfigurable(t, config, func(s *api.Server) { + res := test.PerformRequest(t, s, "GET", "/debug/pprof/heap", nil, nil) + require.Equal(t, 404, res.Result().StatusCode) + }) +} diff --git a/internal/test/helper_request.go b/internal/test/helper_request.go new file mode 100644 index 0000000..7693358 --- /dev/null +++ b/internal/test/helper_request.go @@ -0,0 +1,132 @@ +package test + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/allaboutapps/integresql/internal/api" + "github.com/labstack/echo/v4" +) + +type GenericPayload map[string]interface{} +type GenericArrayPayload []interface{} + +func (g GenericPayload) Reader(t *testing.T) *bytes.Reader { + t.Helper() + + b, err := json.Marshal(g) + if err != nil { + t.Fatalf("failed to serialize payload: %v", err) + } + + return bytes.NewReader(b) +} + +func (g GenericArrayPayload) Reader(t *testing.T) *bytes.Reader { + t.Helper() + + b, err := json.Marshal(g) + if err != nil { + t.Fatalf("failed to serialize payload: %v", err) + } + + return bytes.NewReader(b) +} + +func PerformRequestWithParams(t *testing.T, s *api.Server, method string, path string, body GenericPayload, headers http.Header, queryParams map[string]string) *httptest.ResponseRecorder { + t.Helper() + + if body == nil { + return PerformRequestWithRawBody(t, s, method, path, nil, headers, queryParams) + } + + return PerformRequestWithRawBody(t, s, method, path, body.Reader(t), headers, queryParams) +} + +func PerformRequestWithArrayAndParams(t *testing.T, s *api.Server, method string, path string, body GenericArrayPayload, headers http.Header, queryParams map[string]string) *httptest.ResponseRecorder { + t.Helper() + + if body == nil { + return PerformRequestWithRawBody(t, s, method, path, nil, headers, queryParams) + } + + return PerformRequestWithRawBody(t, s, method, path, body.Reader(t), headers, queryParams) +} + +func PerformRequestWithRawBody(t *testing.T, s *api.Server, method string, path string, body io.Reader, headers http.Header, queryParams map[string]string) *httptest.ResponseRecorder { + t.Helper() + + req := httptest.NewRequest(method, path, body) + + if headers != nil { + req.Header = headers + } + if body != nil && len(req.Header.Get(echo.HeaderContentType)) == 0 { + req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON) + } + + if queryParams != nil { + q := req.URL.Query() + for k, v := range queryParams { + q.Add(k, v) + } + + req.URL.RawQuery = q.Encode() + } + + res := httptest.NewRecorder() + + s.Echo.ServeHTTP(res, req) + + return res +} + +func PerformRequest(t *testing.T, s *api.Server, method string, path string, body GenericPayload, headers http.Header) *httptest.ResponseRecorder { + t.Helper() + + return PerformRequestWithParams(t, s, method, path, body, headers, nil) +} + +func PerformRequestWithArray(t *testing.T, s *api.Server, method string, path string, body GenericArrayPayload, headers http.Header) *httptest.ResponseRecorder { + t.Helper() + + return PerformRequestWithArrayAndParams(t, s, method, path, body, headers, nil) +} + +func ParseResponseBody(t *testing.T, res *httptest.ResponseRecorder, v interface{}) { + t.Helper() + + if err := json.NewDecoder(res.Result().Body).Decode(&v); err != nil { + t.Fatalf("Failed to parse response body: %v", err) + } +} + +// func ParseResponseAndValidate(t *testing.T, res *httptest.ResponseRecorder, v runtime.Validatable) { +// t.Helper() + +// ParseResponseBody(t, res, &v) + +// if err := v.Validate(strfmt.Default); err != nil { +// t.Fatalf("Failed to validate response: %v", err) +// } +// } + +func HeadersWithAuth(t *testing.T, token string) http.Header { + t.Helper() + + return HeadersWithConfigurableAuth(t, "Bearer", token) +} + +func HeadersWithConfigurableAuth(t *testing.T, scheme string, token string) http.Header { + t.Helper() + + headers := http.Header{} + headers.Set(echo.HeaderAuthorization, fmt.Sprintf("%s %s", scheme, token)) + + return headers +} diff --git a/internal/test/test_server.go b/internal/test/test_server.go new file mode 100644 index 0000000..45c108c --- /dev/null +++ b/internal/test/test_server.go @@ -0,0 +1,62 @@ +package test + +import ( + "context" + "testing" + + "github.com/allaboutapps/integresql/internal/api" + "github.com/allaboutapps/integresql/internal/router" +) + +// WithTestServer returns a fully configured server (using the default server config). +func WithTestServer(t *testing.T, closure func(s *api.Server)) { + t.Helper() + defaultConfig := api.DefaultServerConfigFromEnv() + WithTestServerConfigurable(t, defaultConfig, closure) +} + +// WithTestServerConfigurable returns a fully configured server, allowing for configuration using the provided server config. +func WithTestServerConfigurable(t *testing.T, config api.ServerConfig, closure func(s *api.Server)) { + t.Helper() + ctx := context.Background() + WithTestServerConfigurableContext(ctx, t, config, closure) +} + +// WithTestServerConfigurableContext returns a fully configured server, allowing for configuration using the provided server config. +// The provided context will be used during setup (instead of the default background context). +func WithTestServerConfigurableContext(ctx context.Context, t *testing.T, config api.ServerConfig, closure func(s *api.Server)) { + t.Helper() + execClosureNewTestServer(ctx, t, config, closure) + +} + +// Executes closure on a new test server +func execClosureNewTestServer(ctx context.Context, t *testing.T, config api.ServerConfig, closure func(s *api.Server)) { + t.Helper() + + // https://stackoverflow.com/questions/43424787/how-to-use-next-available-port-in-http-listenandserve + // You may use port 0 to indicate you're not specifying an exact port but you want a free, available port selected by the system + config.Address = ":0" + + s := api.NewServer(config) + + if err := s.InitManager(ctx); err != nil { + t.Fatalf("failed to start manager: %v", err) + } + + router.Init(s) + + closure(s) + + // echo is managed and should close automatically after running the test + if err := s.Echo.Shutdown(ctx); err != nil { + t.Fatalf("failed to shutdown server: %v", err) + } + + if err := s.Manager.Disconnect(ctx, true); err != nil { + t.Fatalf("failed to shutdown manager: %v", err) + } + + // disallow any further refs to managed object after running the test + s = nil +} diff --git a/pkg/db/database.go b/pkg/db/database.go new file mode 100644 index 0000000..9cc0df9 --- /dev/null +++ b/pkg/db/database.go @@ -0,0 +1,16 @@ +package db + +type Database struct { + TemplateHash string `json:"templateHash"` + Config DatabaseConfig `json:"config"` +} + +type TestDatabase struct { + Database `json:"database"` + + ID int `json:"id"` +} + +type TemplateDatabase struct { + Database `json:"database"` +} diff --git a/pkg/manager/database_config.go b/pkg/db/database_config.go similarity index 98% rename from pkg/manager/database_config.go rename to pkg/db/database_config.go index 4c351ae..8a3ef7f 100644 --- a/pkg/manager/database_config.go +++ b/pkg/db/database_config.go @@ -1,4 +1,4 @@ -package manager +package db import ( "fmt" diff --git a/pkg/manager/database_config_test.go b/pkg/db/database_config_internal_test.go similarity index 99% rename from pkg/manager/database_config_test.go rename to pkg/db/database_config_internal_test.go index 5d6c4fc..b81b209 100644 --- a/pkg/manager/database_config_test.go +++ b/pkg/db/database_config_internal_test.go @@ -1,4 +1,4 @@ -package manager +package db import ( "encoding/json" diff --git a/pkg/manager/database.go b/pkg/manager/database.go deleted file mode 100644 index b83b4ca..0000000 --- a/pkg/manager/database.go +++ /dev/null @@ -1,102 +0,0 @@ -package manager - -import ( - "context" - "errors" - "sync" -) - -type databaseState int - -const ( - databaseStateInit databaseState = iota - databaseStateDiscarded databaseState = iota - databaseStateReady databaseState = iota -) - -var ErrDatabaseDiscarded = errors.New("ErrDatabaseDiscarded") - -type Database struct { - sync.RWMutex `json:"-"` - - TemplateHash string `json:"templateHash"` - Config DatabaseConfig `json:"config"` - - state databaseState - c chan struct{} -} - -func (d *Database) State() databaseState { - d.RLock() - defer d.RUnlock() - - return d.state -} - -func (d *Database) Ready() bool { - d.RLock() - defer d.RUnlock() - - return d.state == databaseStateReady -} - -func (d *Database) WaitUntilReady(ctx context.Context) error { - - state := d.State() - - if state == databaseStateReady { - return nil - } else if state == databaseStateDiscarded { - return ErrDatabaseDiscarded - } - - for { - select { - case <-d.c: - state := d.State() - - if state == databaseStateReady { - return nil - } else if state == databaseStateDiscarded { - return ErrDatabaseDiscarded - } - - case <-ctx.Done(): - return ctx.Err() - } - } -} - -func (d *Database) FlagAsReady() { - - state := d.State() - if state != databaseStateInit { - return - } - - d.Lock() - defer d.Unlock() - - d.state = databaseStateReady - - if d.c != nil { - close(d.c) - } -} - -func (d *Database) FlagAsDiscarded() { - - state := d.State() - if state != databaseStateInit { - return - } - - d.Lock() - defer d.Unlock() - - d.state = databaseStateDiscarded - - if d.c != nil { - close(d.c) - } -} diff --git a/pkg/manager/testing.go b/pkg/manager/helpers_test.go similarity index 71% rename from pkg/manager/testing.go rename to pkg/manager/helpers_test.go index 92111e3..00baa06 100644 --- a/pkg/manager/testing.go +++ b/pkg/manager/helpers_test.go @@ -1,38 +1,57 @@ -package manager +package manager_test import ( "context" "database/sql" "errors" - "sync" "testing" "time" + + "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/manager" + "github.com/allaboutapps/integresql/pkg/util" ) -func testManagerFromEnv() *Manager { - conf := DefaultManagerConfigFromEnv() +func testManagerFromEnv() *manager.Manager { + conf := manager.DefaultManagerConfigFromEnv() + conf.DatabasePrefix = "pgtestpool" // ensure we don't overlap with other pools running concurrently + m, _ := manager.New(conf) + return m +} + +func testManagerFromEnvWithConfig() (*manager.Manager, manager.ManagerConfig) { + conf := manager.DefaultManagerConfigFromEnv() + conf.DatabasePrefix = "pgtestpool" // ensure we don't overlap with other pools running concurrently + return manager.New(conf) +} + +func testManagerWithConfig(conf manager.ManagerConfig) (*manager.Manager, manager.ManagerConfig) { conf.DatabasePrefix = "pgtestpool" // ensure we don't overlap with other pools running concurrently - return New(conf) + return manager.New(conf) } // test helpers should never return errors, but are passed the *testing.T instance and fail if needed. It seems to be recommended helper functions are moved to a testing.go file... // https://medium.com/@povilasve/go-advanced-tips-tricks-a872503ac859 // https://about.sourcegraph.com/go/advanced-testing-in-go -func disconnectManager(t *testing.T, m *Manager) { +func disconnectManager(t *testing.T, m *manager.Manager) { t.Helper() + timeout := 1 * time.Second + ctx := context.Background() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() + _, err := util.WaitWithTimeout(ctx, timeout, func(ctx context.Context) (bool, error) { + err := m.Disconnect(ctx, true) + return false, err + }) - if err := m.Disconnect(ctx, true); err != nil { - t.Logf("received error while disconnecting manager: %v", err) + if err != nil { + t.Errorf("received error while disconnecting manager: %v", err) } + } -func initTemplateDB(wg *sync.WaitGroup, errs chan<- error, m *Manager) { - defer wg.Done() +func initTemplateDB(_ context.Context, errs chan<- error, m *manager.Manager) { template, err := m.InitializeTemplateDatabase(context.Background(), "hashinghash") if err != nil { @@ -40,15 +59,15 @@ func initTemplateDB(wg *sync.WaitGroup, errs chan<- error, m *Manager) { return } - if template.Ready() { - errs <- errors.New("template database is marked as ready") + if template.TemplateHash != "hashinghash" { + errs <- errors.New("template database is invalid") return } errs <- nil } -func populateTemplateDB(t *testing.T, template *TemplateDatabase) { +func populateTemplateDB(t *testing.T, template db.TemplateDatabase) { t.Helper() db, err := sql.Open("postgres", template.Config.ConnectionString()) @@ -106,7 +125,7 @@ func populateTemplateDB(t *testing.T, template *TemplateDatabase) { } } -func verifyTestDB(t *testing.T, test *TestDatabase) { +func verifyTestDB(t *testing.T, test db.TestDatabase) { t.Helper() db, err := sql.Open("postgres", test.Config.ConnectionString()) @@ -140,22 +159,8 @@ func verifyTestDB(t *testing.T, test *TestDatabase) { } } -func getTestDB(wg *sync.WaitGroup, errs chan<- error, m *Manager) { - defer wg.Done() +func getTestDB(_ context.Context, errs chan<- error, m *manager.Manager) { - db, err := m.GetTestDatabase(context.Background(), "hashinghash") - if err != nil { - errs <- err - return - } - - if !db.Ready() { - errs <- errors.New("test database is marked as not ready") - return - } - if !db.Dirty() { - errs <- errors.New("test database is not marked as dirty") - } - - errs <- nil + _, err := m.GetTestDatabase(context.Background(), "hashinghash") + errs <- err } diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 3b24275..554ab52 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -3,13 +3,19 @@ package manager import ( "context" "database/sql" + "encoding/json" "errors" "fmt" - "sort" - "sync" - "time" + "runtime/trace" + "strings" + "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/pool" + "github.com/allaboutapps/integresql/pkg/templates" + "github.com/allaboutapps/integresql/pkg/util" "github.com/lib/pq" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" ) var ( @@ -17,84 +23,125 @@ var ( ErrTemplateAlreadyInitialized = errors.New("template is already initialized") ErrTemplateNotFound = errors.New("template not found") ErrTestNotFound = errors.New("test database not found") + ErrTemplateDiscarded = errors.New("template is discarded, can't be used") + ErrInvalidTemplateState = errors.New("unexpected template state") ) type Manager struct { - config ManagerConfig - db *sql.DB - templates map[string]*TemplateDatabase - templateMutex sync.RWMutex - wg sync.WaitGroup + config ManagerConfig + db *sql.DB + + templates *templates.Collection + pool *pool.PoolCollection } -func New(config ManagerConfig) *Manager { - m := &Manager{ - config: config, - db: nil, - templates: map[string]*TemplateDatabase{}, - wg: sync.WaitGroup{}, +func New(config ManagerConfig) (*Manager, ManagerConfig) { + + var testDBPrefix string + if config.DatabasePrefix != "" { + testDBPrefix = testDBPrefix + fmt.Sprintf("%s_", config.DatabasePrefix) } + if config.PoolConfig.TestDBNamePrefix != "" { + testDBPrefix = testDBPrefix + fmt.Sprintf("%s_", config.PoolConfig.TestDBNamePrefix) + } + + config.PoolConfig.TestDBNamePrefix = testDBPrefix - if len(m.config.TestDatabaseOwner) == 0 { - m.config.TestDatabaseOwner = m.config.ManagerDatabaseConfig.Username + if len(config.TestDatabaseOwner) == 0 { + config.TestDatabaseOwner = config.ManagerDatabaseConfig.Username } - if len(m.config.TestDatabaseOwnerPassword) == 0 { - m.config.TestDatabaseOwnerPassword = m.config.ManagerDatabaseConfig.Password + if len(config.TestDatabaseOwnerPassword) == 0 { + config.TestDatabaseOwnerPassword = config.ManagerDatabaseConfig.Password } - if m.config.TestDatabaseInitialPoolSize > m.config.TestDatabaseMaxPoolSize && m.config.TestDatabaseMaxPoolSize > 0 { - m.config.TestDatabaseInitialPoolSize = m.config.TestDatabaseMaxPoolSize + // at least one test database needs to be present initially + if config.PoolConfig.InitialPoolSize == 0 { + config.PoolConfig.InitialPoolSize = 1 } - return m + if config.PoolConfig.InitialPoolSize > config.PoolConfig.MaxPoolSize && config.PoolConfig.MaxPoolSize > 0 { + config.PoolConfig.InitialPoolSize = config.PoolConfig.MaxPoolSize + } + + if config.PoolConfig.MaxParallelTasks < 1 { + config.PoolConfig.MaxParallelTasks = 1 + } + + // debug log final derived config + c, err := json.Marshal(config) + + if err != nil { + log.Fatal().Err(err).Msg("Failed to marshal the env") + } + + log.Debug().RawJSON("config", c).Msg("manager.New") + + m := &Manager{ + config: config, + db: nil, + templates: templates.NewCollection(), + pool: pool.NewPoolCollection(config.PoolConfig), + } + + return m, m.config } func DefaultFromEnv() *Manager { - return New(DefaultManagerConfigFromEnv()) + m, _ := New(DefaultManagerConfigFromEnv()) + return m } func (m *Manager) Connect(ctx context.Context) error { + + log := m.getManagerLogger(ctx, "Connect") + if m.db != nil { - return errors.New("manager is already connected") + err := errors.New("manager is already connected") + log.Error().Err(err) + return err } db, err := sql.Open("postgres", m.config.ManagerDatabaseConfig.ConnectionString()) if err != nil { + log.Error().Err(err).Msg("unable to connect") return err } if err := db.PingContext(ctx); err != nil { + log.Error().Err(err).Msg("unable to ping") return err } m.db = db + log.Debug().Msg("connected.") + return nil } func (m *Manager) Disconnect(ctx context.Context, ignoreCloseError bool) error { - if m.db == nil { - return errors.New("manager is not connected") - } - c := make(chan struct{}) - go func() { - defer close(c) - m.wg.Wait() - }() + log := m.getManagerLogger(ctx, "Disconnect").With().Bool("ignoreCloseError", ignoreCloseError).Logger() - select { - case <-c: - case <-ctx.Done(): + if m.db == nil { + err := errors.New("manager is not connected") + log.Error().Err(err) + return err } + // stop the pool before closing DB connection + m.pool.Stop() + if err := m.db.Close(); err != nil && !ignoreCloseError { + log.Error().Err(err) return err } m.db = nil + log.Warn().Msg("disconnected.") + return nil } @@ -106,97 +153,139 @@ func (m *Manager) Reconnect(ctx context.Context, ignoreDisconnectError bool) err return m.Connect(ctx) } -func (m *Manager) Ready() bool { +func (m Manager) Ready() bool { return m.db != nil } +func (m Manager) Config() ManagerConfig { + return m.config +} + func (m *Manager) Initialize(ctx context.Context) error { + + log := m.getManagerLogger(ctx, "Initialize") + if !m.Ready() { if err := m.Connect(ctx); err != nil { + log.Error().Err(err) return err } } - rows, err := m.db.QueryContext(ctx, "SELECT datname FROM pg_database WHERE datname LIKE $1", fmt.Sprintf("%s_%s_%%", m.config.DatabasePrefix, m.config.TestDatabasePrefix)) + rows, err := m.db.QueryContext(ctx, "SELECT datname FROM pg_database WHERE datname LIKE $1", fmt.Sprintf("%s_%s_%%", m.config.DatabasePrefix, m.config.PoolConfig.TestDBNamePrefix)) if err != nil { + log.Error().Err(err) return err } defer rows.Close() + log.Debug().Msg("Dropping unmanaged dbs...") + for rows.Next() { var dbName string if err := rows.Scan(&dbName); err != nil { return err } + log.Warn().Str("dbName", dbName).Msg("Dropping...") + if _, err := m.db.Exec(fmt.Sprintf("DROP DATABASE %s", pq.QuoteIdentifier(dbName))); err != nil { + log.Error().Str("dbName", dbName).Err(err) return err } } + log.Info().Msg("initialized.") + return nil } -func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (*TemplateDatabase, error) { +func (m Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (db.TemplateDatabase, error) { + ctx, task := trace.NewTask(ctx, "initialize_template_db") + + log := m.getManagerLogger(ctx, "InitializeTemplateDatabase").With().Str("hash", hash).Logger() + + defer task.End() + if !m.Ready() { - return nil, ErrManagerNotReady + log.Error().Msg("not ready") + return db.TemplateDatabase{}, ErrManagerNotReady + } + + dbName := m.makeTemplateDatabaseName(hash) + templateConfig := templates.TemplateConfig{ + DatabaseConfig: db.DatabaseConfig{ + Host: m.config.ManagerDatabaseConfig.Host, + Port: m.config.ManagerDatabaseConfig.Port, + Username: m.config.ManagerDatabaseConfig.Username, + Password: m.config.ManagerDatabaseConfig.Password, + Database: dbName, + }, } - m.templateMutex.Lock() - defer m.templateMutex.Unlock() - - _, ok := m.templates[hash] + added, unlock := m.templates.Push(ctx, hash, templateConfig) + // unlock template collection only after the template is actually initalized in the DB + defer unlock() - if ok { - // fmt.Println("initialized!", ok) - return nil, ErrTemplateAlreadyInitialized + if !added { + return db.TemplateDatabase{}, ErrTemplateAlreadyInitialized } - // fmt.Println("initializing...", ok) + reg := trace.StartRegion(ctx, "drop_and_create_db") + if err := m.dropAndCreateDatabase(ctx, dbName, m.config.ManagerDatabaseConfig.Username, m.config.TemplateDatabaseTemplate); err != nil { - dbName := fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) - template := &TemplateDatabase{ - Database: Database{ - TemplateHash: hash, - Config: DatabaseConfig{ - Host: m.config.ManagerDatabaseConfig.Host, - Port: m.config.ManagerDatabaseConfig.Port, - Username: m.config.ManagerDatabaseConfig.Username, - Password: m.config.ManagerDatabaseConfig.Password, - Database: dbName, - }, - state: databaseStateInit, - c: make(chan struct{}), - }, - nextTestID: 0, - testDatabases: make([]*TestDatabase, 0), + log.Error().Err(err).Msg("triggering unsafe remove after dropAndCreateDatabase failed...") + m.templates.RemoveUnsafe(ctx, hash) + + return db.TemplateDatabase{}, err } + reg.End() - m.templates[hash] = template + // if template config has been overwritten, the existing pool needs to be removed + err := m.pool.RemoveAllWithHash(ctx, hash, m.dropTestPoolDB) + if err != nil && !errors.Is(err, pool.ErrUnknownHash) { - if err := m.dropAndCreateDatabase(ctx, dbName, m.config.ManagerDatabaseConfig.Username, m.config.TemplateDatabaseTemplate); err != nil { - delete(m.templates, hash) - // m.templates[hash] = nil + log.Error().Err(err).Msg("triggering unsafe remove after RemoveAllWithHash failed...") + m.templates.RemoveUnsafe(ctx, hash) - return nil, err + return db.TemplateDatabase{}, err } - return template, nil + return db.TemplateDatabase{ + Database: db.Database{ + TemplateHash: hash, + Config: templateConfig.DatabaseConfig, + }, + }, nil } -func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) error { +func (m Manager) DiscardTemplateDatabase(ctx context.Context, hash string) error { + + ctx, task := trace.NewTask(ctx, "discard_template_db") + log := m.getManagerLogger(ctx, "DiscardTemplateDatabase").With().Str("hash", hash).Logger() + + defer task.End() if !m.Ready() { + log.Error().Msg("not ready") return ErrManagerNotReady } - m.templateMutex.Lock() - defer m.templateMutex.Unlock() + // first remove all DB with this hash + if err := m.pool.RemoveAllWithHash(ctx, hash, m.dropTestPoolDB); err != nil && !errors.Is(err, pool.ErrUnknownHash) { + log.Error().Err(err).Msg("remove all err") + return err + } + + template, found := m.templates.Pop(ctx, hash) + dbName := template.Config.Database - template, ok := m.templates[hash] + if !found { + // even if a template is not found in the collection, it might still exist in the DB - if !ok { - dbName := fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) + log.Warn().Msg("template not found, checking for existance...") + + dbName = m.makeTemplateDatabaseName(hash) exists, err := m.checkDatabaseExists(ctx, dbName) if err != nil { return err @@ -205,232 +294,194 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro if !exists { return ErrTemplateNotFound } + } else { + template.SetState(ctx, templates.TemplateStateDiscarded) } - // discard any still waiting dbs. - template.FlagAsDiscarded() + log.Debug().Msg("found template database, dropping...") - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - if err := template.WaitUntilReady(ctx); err != nil { - cancel() - } - cancel() + return m.dropDatabase(ctx, dbName) +} - // m.templates[hash] = nil - delete(m.templates, hash) +func (m Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db.TemplateDatabase, error) { + ctx, task := trace.NewTask(ctx, "finalize_template_db") - return nil -} + log := m.getManagerLogger(ctx, "FinalizeTemplateDatabase").With().Str("hash", hash).Logger() + + defer task.End() -func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (*TemplateDatabase, error) { if !m.Ready() { - return nil, ErrManagerNotReady + log.Error().Msg("not ready") + return db.TemplateDatabase{}, ErrManagerNotReady } - m.templateMutex.Lock() - defer m.templateMutex.Unlock() - - template, ok := m.templates[hash] - - // We don't allow finalizing NEVER initialized database by integresql! - if !ok { - return nil, ErrTemplateNotFound + template, found := m.templates.Get(ctx, hash) + if !found { + log.Error().Msg("bailout: template not found") + return db.TemplateDatabase{}, ErrTemplateNotFound } - state := template.State() + state, lockedTemplate := template.GetStateWithLock(ctx) + defer lockedTemplate.Unlock() // early bailout if we are already ready (multiple calls) - if state == databaseStateReady { - return template, nil + if state == templates.TemplateStateFinalized { + log.Warn().Msg("bailout: template already finalized") + return db.TemplateDatabase{Database: template.Database}, ErrTemplateAlreadyInitialized } // Disallow transition from discarded to ready - if state == databaseStateDiscarded { - return nil, ErrDatabaseDiscarded + if state == templates.TemplateStateDiscarded { + log.Error().Msg("bailout: template discarded!") + return db.TemplateDatabase{}, ErrTemplateDiscarded } - template.FlagAsReady() + // Init a pool with this hash + log.Trace().Msg("init hash pool...") + m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB) - m.wg.Add(1) - go m.addTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) + lockedTemplate.SetState(ctx, templates.TemplateStateFinalized) - return template, nil + log.Debug().Msg("Template database finalized successfully.") + return db.TemplateDatabase{Database: template.Database}, nil } -func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (*TestDatabase, error) { +// GetTestDatabase tries to get a ready test DB from an existing pool. +func (m Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestDatabase, error) { + ctx, task := trace.NewTask(ctx, "get_test_db") + + log := m.getManagerLogger(ctx, "GetTestDatabase").With().Str("hash", hash).Logger() + + defer task.End() + if !m.Ready() { - return nil, ErrManagerNotReady + log.Error().Msg("not ready") + return db.TestDatabase{}, ErrManagerNotReady } - m.templateMutex.RLock() - template, ok := m.templates[hash] - m.templateMutex.RUnlock() - - if !ok { - return nil, ErrTemplateNotFound + template, found := m.templates.Get(ctx, hash) + if !found { + return db.TestDatabase{}, ErrTemplateNotFound } - if err := template.WaitUntilReady(ctx); err != nil { - return nil, err + // if the template has been discarded/not initalized yet, + // no DB should be returned, even if already in the pool + state := template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) + if state != templates.TemplateStateFinalized { + return db.TestDatabase{}, ErrInvalidTemplateState } - template.Lock() - defer template.Unlock() + ctx, task = trace.NewTask(ctx, "get_with_timeout") + testDB, err := m.pool.GetTestDatabase(ctx, template.TemplateHash, m.config.TestDatabaseGetTimeout) + task.End() + if errors.Is(err, pool.ErrUnknownHash) { + // Template exists, but the pool is not there - + // it must have been removed. + // It needs to be reinitialized. + log.Warn().Err(err).Msg("ErrUnknownHash, going to InitHashPool and recursively calling us again...") + m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB) - var testDB *TestDatabase - for _, db := range template.testDatabases { - if db.ReadyForTest() { - testDB = db - break - } + testDB, err = m.pool.GetTestDatabase(ctx, template.TemplateHash, m.config.TestDatabaseGetTimeout) } - if testDB == nil { - var err error - testDB, err = m.createNextTestDatabase(ctx, template) - if err != nil { - return nil, err - } + if err != nil { + return db.TestDatabase{}, err } - testDB.FlagAsDirty() - - m.wg.Add(1) - go m.addTestDatabasesInBackground(template, 1) - return testDB, nil } -func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) error { +// ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). +func (m Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) error { + ctx, task := trace.NewTask(ctx, "return_test_db") + defer task.End() + if !m.Ready() { return ErrManagerNotReady } - m.templateMutex.RLock() - template, ok := m.templates[hash] - m.templateMutex.RUnlock() - - if !ok { + // check if the template exists and is finalized + template, found := m.templates.Get(ctx, hash) + if !found { return ErrTemplateNotFound } - if err := template.WaitUntilReady(ctx); err != nil { - return err + if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != + templates.TemplateStateFinalized { + + return ErrInvalidTemplateState } - template.Lock() - defer template.Unlock() + // template is ready, we can return unchanged testDB to the pool + return m.pool.ReturnTestDatabase(ctx, hash, id) +} - found := false - for _, db := range template.testDatabases { - if db.ID == id { - found = true - db.FlagAsClean() - break - } +// RecreateTestDatabase recreates the test DB according to the template and returns it back to the pool. +func (m *Manager) RecreateTestDatabase(ctx context.Context, hash string, id int) error { + ctx, task := trace.NewTask(ctx, "recreate_test_db") + defer task.End() + + if !m.Ready() { + return ErrManagerNotReady } + // check if the template exists and is finalized + template, found := m.templates.Get(ctx, hash) if !found { - dbName := fmt.Sprintf("%s_%s_%s_%03d", m.config.DatabasePrefix, m.config.TestDatabasePrefix, hash, id) - exists, err := m.checkDatabaseExists(ctx, dbName) - if err != nil { - return err - } - - if !exists { - return ErrTestNotFound - } - - db := &TestDatabase{ - Database: Database{ - TemplateHash: hash, - Config: DatabaseConfig{ - Host: m.config.ManagerDatabaseConfig.Host, - Port: m.config.ManagerDatabaseConfig.Port, - Username: m.config.TestDatabaseOwner, - Password: m.config.TestDatabaseOwnerPassword, - Database: dbName, - }, - state: databaseStateReady, - c: make(chan struct{}), - }, - ID: id, - dirty: false, - } + return ErrTemplateNotFound + } - template.testDatabases = append(template.testDatabases, db) - sort.Sort(ByID(template.testDatabases)) + if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != + templates.TemplateStateFinalized { + return ErrInvalidTemplateState } - return nil + // template is ready, we can return the testDB to the pool and have it cleaned up + return m.pool.RecreateTestDatabase(ctx, hash, id) } -func (m *Manager) ClearTrackedTestDatabases(hash string) error { +func (m Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) error { + + log := m.getManagerLogger(ctx, "ClearTrackedTestDatabases").With().Str("hash", hash).Logger() + if !m.Ready() { + log.Error().Msg("not ready") return ErrManagerNotReady } - m.templateMutex.RLock() - template, ok := m.templates[hash] - m.templateMutex.RUnlock() + log.Warn().Msg("clearing...") - if !ok { + err := m.pool.RemoveAllWithHash(ctx, hash, m.dropTestPoolDB) + if errors.Is(err, pool.ErrUnknownHash) { return ErrTemplateNotFound } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - if err := template.WaitUntilReady(ctx); err != nil { - cancel() - return err - } - cancel() - - template.Lock() - defer template.Unlock() - - for i := range template.testDatabases { - template.testDatabases[i] = nil - } + return err +} - template.testDatabases = make([]*TestDatabase, 0) - template.nextTestID = 0 +func (m Manager) ResetAllTracking(ctx context.Context) error { - return nil -} + log := m.getManagerLogger(ctx, "ResetAllTracking") -func (m *Manager) ResetAllTracking() error { if !m.Ready() { + log.Error().Msg("not ready") return ErrManagerNotReady } - m.templateMutex.Lock() - defer m.templateMutex.Unlock() + log.Warn().Msg("resetting...") - for hash := range m.templates { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - if err := m.templates[hash].WaitUntilReady(ctx); err != nil { - cancel() - continue - } - cancel() - - m.templates[hash].Lock() - for i := range m.templates[hash].testDatabases { - m.templates[hash].testDatabases[i] = nil - } - m.templates[hash].Unlock() - - delete(m.templates, hash) - // m.templates[hash] = nil - } + // remove all templates to disallow any new test DB creation from existing templates + m.templates.RemoveAll(ctx) - m.templates = map[string]*TemplateDatabase{} - - return nil + return m.pool.RemoveAll(ctx, m.dropTestPoolDB) } -func (m *Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, error) { +func (m Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, error) { var exists bool + + log := m.getManagerLogger(ctx, "checkDatabaseExists") + log.Trace().Msgf("SELECT 1 AS exists FROM pg_database WHERE datname = %s\n", dbName) + if err := m.db.QueryRowContext(ctx, "SELECT 1 AS exists FROM pg_database WHERE datname = $1", dbName).Scan(&exists); err != nil { if err == sql.ErrNoRows { return false, nil @@ -442,104 +493,92 @@ func (m *Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, return exists, nil } -func (m *Manager) createDatabase(ctx context.Context, dbName string, owner string, template string) error { +func (m Manager) checkDatabaseConnected(ctx context.Context, dbName string) (bool, error) { - // ts := time.Now() - // fmt.Println("createDatabase", dbName, ts) + var countConnected int - if _, err := m.db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s WITH OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner), pq.QuoteIdentifier(template))); err != nil { - return err + if err := m.db.QueryRowContext(ctx, "SELECT count(pid) FROM pg_stat_activity WHERE datname = $1", dbName).Scan(&countConnected); err != nil { + if err == sql.ErrNoRows { + return false, nil + } + + return false, err } - return nil + if countConnected > 0 { + return true, nil + } + + return false, nil } -func (m *Manager) dropDatabase(ctx context.Context, dbName string) error { +func (m Manager) createDatabase(ctx context.Context, dbName string, owner string, template string) error { - // ts := time.Now() - // fmt.Println("dropDatabase", dbName, ts) + defer trace.StartRegion(ctx, "create_db").End() - if _, err := m.db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(dbName))); err != nil { + log := m.getManagerLogger(ctx, "createDatabase") + log.Trace().Msgf("CREATE DATABASE %s WITH OWNER %s TEMPLATE %s\n", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner), pq.QuoteIdentifier(template)) + + if _, err := m.db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s WITH OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner), pq.QuoteIdentifier(template))); err != nil { return err } return nil } -func (m *Manager) dropAndCreateDatabase(ctx context.Context, dbName string, owner string, template string) error { - if err := m.dropDatabase(ctx, dbName); err != nil { +func (m Manager) recreateTestPoolDB(ctx context.Context, testDB db.TestDatabase, templateName string) error { + + connected, err := m.checkDatabaseConnected(ctx, testDB.Database.Config.Database) + + if err != nil { return err } - return m.createDatabase(ctx, dbName, owner, template) + if connected { + return pool.ErrTestDBInUse + } + + return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateName) } -// Creates a new test database for the template and increments the next ID. -// ! ATTENTION: this function assumes `template` has already been LOCKED by its caller and will NOT synchronize access again ! -// The newly created database object is returned as well as added to the template's DB list automatically. -func (m *Manager) createNextTestDatabase(ctx context.Context, template *TemplateDatabase) (*TestDatabase, error) { - dbName := fmt.Sprintf("%s_%s_%s_%03d", m.config.DatabasePrefix, m.config.TestDatabasePrefix, template.TemplateHash, template.nextTestID) - - if err := m.dropAndCreateDatabase(ctx, dbName, m.config.TestDatabaseOwner, template.Config.Database); err != nil { - return nil, err - } - - testDB := &TestDatabase{ - Database: Database{ - TemplateHash: template.TemplateHash, - Config: DatabaseConfig{ - Host: m.config.ManagerDatabaseConfig.Host, - Port: m.config.ManagerDatabaseConfig.Port, - Username: m.config.TestDatabaseOwner, - Password: m.config.TestDatabaseOwnerPassword, - Database: dbName, - }, - state: databaseStateReady, - c: make(chan struct{}), - }, - ID: template.nextTestID, - dirty: false, - } +func (m Manager) dropTestPoolDB(ctx context.Context, testDB db.TestDatabase) error { + return m.dropDatabase(ctx, testDB.Config.Database) +} - template.testDatabases = append(template.testDatabases, testDB) - template.nextTestID++ +func (m Manager) dropDatabase(ctx context.Context, dbName string) error { - if template.nextTestID > m.config.TestDatabaseMaxPoolSize { - i := 0 - for idx, db := range template.testDatabases { - if db.Dirty() { - i = idx - break - } - } + defer trace.StartRegion(ctx, "drop_db").End() - if err := m.dropDatabase(ctx, template.testDatabases[i].Config.Database); err != nil { - return nil, err - } + log := m.getManagerLogger(ctx, "dropDatabase") + log.Trace().Msgf("DROP DATABASE IF EXISTS %s\n", pq.QuoteIdentifier(dbName)) - // Delete while preserving order, avoiding memory leaks due to points in accordance to: https://github.com/golang/go/wiki/SliceTricks - if i < len(template.testDatabases)-1 { - copy(template.testDatabases[i:], template.testDatabases[i+1:]) + if _, err := m.db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(dbName))); err != nil { + if strings.Contains(err.Error(), "is being accessed by other users") { + return pool.ErrTestDBInUse } - template.testDatabases[len(template.testDatabases)-1] = nil - template.testDatabases = template.testDatabases[:len(template.testDatabases)-1] + + return err } - return testDB, nil + return nil } -// Adds new test databases for a template, intended to be run asynchronously from other operations in a separate goroutine, using the manager's WaitGroup to synchronize for shutdown. -// This function will lock `template` until all requested test DBs have been created and signal the WaitGroup about completion afterwards. -func (m *Manager) addTestDatabasesInBackground(template *TemplateDatabase, count int) { - defer m.wg.Done() +func (m Manager) dropAndCreateDatabase(ctx context.Context, dbName string, owner string, template string) error { + if !m.Ready() { + return ErrManagerNotReady + } + + if err := m.dropDatabase(ctx, dbName); err != nil { + return err + } - template.Lock() - defer template.Unlock() + return m.createDatabase(ctx, dbName, owner, template) +} - ctx := context.Background() +func (m Manager) makeTemplateDatabaseName(hash string) string { + return fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) +} - for i := 0; i < count; i++ { - // TODO log error somewhere instead of silently swallowing it? - _, _ = m.createNextTestDatabase(ctx, template) - } +func (m Manager) getManagerLogger(ctx context.Context, managerFunction string) zerolog.Logger { + return util.LogFromContext(ctx).With().Str("managerFn", managerFunction).Logger() } diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index e1da06e..a37a13e 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -1,27 +1,34 @@ package manager import ( + "runtime" + "time" + + "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/pool" "github.com/allaboutapps/integresql/pkg/util" ) -type ManagerConfig struct { - ManagerDatabaseConfig DatabaseConfig +// we explicitly want to access this struct via manager.ManagerConfig, thus we disable revive for the next line +type ManagerConfig struct { //nolint:revive + ManagerDatabaseConfig db.DatabaseConfig `json:"-"` // sensitive TemplateDatabaseTemplate string - DatabasePrefix string - TemplateDatabasePrefix string - TestDatabasePrefix string - TestDatabaseOwner string - TestDatabaseOwnerPassword string - TestDatabaseInitialPoolSize int - TestDatabaseMaxPoolSize int + DatabasePrefix string + TemplateDatabasePrefix string + TestDatabaseOwner string + TestDatabaseOwnerPassword string `json:"-"` // sensitive + TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state + TestDatabaseGetTimeout time.Duration // Time to wait for a ready database + + PoolConfig pool.PoolConfig } func DefaultManagerConfigFromEnv() ManagerConfig { return ManagerConfig{ - ManagerDatabaseConfig: DatabaseConfig{ + ManagerDatabaseConfig: db.DatabaseConfig{ Host: util.GetEnv("INTEGRESQL_PGHOST", util.GetEnv("PGHOST", "127.0.0.1")), Port: util.GetEnvAsInt("INTEGRESQL_PGPORT", util.GetEnvAsInt("PGPORT", 5432)), @@ -43,13 +50,23 @@ func DefaultManagerConfigFromEnv() ManagerConfig { // DatabasePrefix_TemplateDatabasePrefix_HASH TemplateDatabasePrefix: util.GetEnv("INTEGRESQL_TEMPLATE_DB_PREFIX", "template"), - // DatabasePrefix_TestDatabasePrefix_HASH_ID - TestDatabasePrefix: util.GetEnv("INTEGRESQL_TEST_DB_PREFIX", "test"), + // we reuse the same user (PGUSER) and passwort (PGPASSWORT) for the test / template databases by default + TestDatabaseOwner: util.GetEnv("INTEGRESQL_TEST_PGUSER", util.GetEnv("INTEGRESQL_PGUSER", util.GetEnv("PGUSER", "postgres"))), + TestDatabaseOwnerPassword: util.GetEnv("INTEGRESQL_TEST_PGPASSWORD", util.GetEnv("INTEGRESQL_PGPASSWORD", util.GetEnv("PGPASSWORD", ""))), - // reuse the same user (PGUSER) and passwort (PGPASSWORT) for the test / template databases by default - TestDatabaseOwner: util.GetEnv("INTEGRESQL_TEST_PGUSER", util.GetEnv("INTEGRESQL_PGUSER", util.GetEnv("PGUSER", "postgres"))), - TestDatabaseOwnerPassword: util.GetEnv("INTEGRESQL_TEST_PGPASSWORD", util.GetEnv("INTEGRESQL_PGPASSWORD", util.GetEnv("PGPASSWORD", ""))), - TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", 10), - TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), + // typically these timeouts should be the same as INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS + // see internal/api/server_config.go + TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", util.GetEnvAsInt("INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS", 60*1000 /*1 min*/))), + TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", util.GetEnvAsInt("INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS", 60*1000 /*1 min*/))), + + PoolConfig: pool.PoolConfig{ + InitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", runtime.NumCPU()), // previously default 10 + MaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", runtime.NumCPU()*4), // previously default 500 + TestDBNamePrefix: util.GetEnv("INTEGRESQL_TEST_DB_PREFIX", "test"), // DatabasePrefix_TestDBNamePrefix_HASH_ID + MaxParallelTasks: util.GetEnvAsInt("INTEGRESQL_POOL_MAX_PARALLEL_TASKS", runtime.NumCPU()), + TestDatabaseRetryRecreateSleepMin: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MIN_MS", 250 /*250 ms*/)), + TestDatabaseRetryRecreateSleepMax: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MAX_MS", 1000*3 /*3 sec*/)), + TestDatabaseMinimalLifetime: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_MINIMAL_LIFETIME_MS", 250 /*250 ms*/)), + }, } } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index ea0f3a0..8765fd2 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -1,4 +1,4 @@ -package manager +package manager_test import ( "context" @@ -9,7 +9,12 @@ import ( "testing" "time" + "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/manager" "github.com/lib/pq" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" ) func TestManagerConnect(t *testing.T) { @@ -30,8 +35,8 @@ func TestManagerConnect(t *testing.T) { func TestManagerConnectError(t *testing.T) { t.Parallel() - m := New(ManagerConfig{ - ManagerDatabaseConfig: DatabaseConfig{ + m, _ := manager.New(manager.ManagerConfig{ + ManagerDatabaseConfig: db.DatabaseConfig{ Host: "definitelydoesnotexist", Port: 2345, Username: "definitelydoesnotexist", @@ -106,12 +111,7 @@ func TestManagerInitializeTemplateDatabase(t *testing.T) { t.Fatalf("failed to initialize template database: %v", err) } - if template.Ready() { - t.Error("template database is marked as ready") - } - if template.TemplateHash != hash { - t.Errorf("template has not set correctly, got %q, want %q", template.TemplateHash, hash) - } + assert.Equal(t, hash, template.TemplateHash) } func TestManagerInitializeTemplateDatabaseTimeout(t *testing.T) { @@ -129,7 +129,7 @@ func TestManagerInitializeTemplateDatabaseTimeout(t *testing.T) { defer cancel() _, err := m.InitializeTemplateDatabase(ctxt, hash) - if err != context.DeadlineExceeded { + if !errors.Is(err, context.DeadlineExceeded) { t.Fatalf("received unexpected error, got %v, want %v", err, context.DeadlineExceeded) } } @@ -151,7 +151,10 @@ func TestManagerInitializeTemplateDatabaseConcurrently(t *testing.T) { wg.Add(templateDBCount) for i := 0; i < templateDBCount; i++ { - go initTemplateDB(&wg, errs, m) + go func() { + defer wg.Done() + initTemplateDB(ctx, errs, m) + }() } wg.Wait() @@ -170,7 +173,7 @@ func TestManagerInitializeTemplateDatabaseConcurrently(t *testing.T) { if err == nil { success++ } else { - if err == ErrTemplateAlreadyInitialized { + if errors.Is(err, manager.ErrTemplateAlreadyInitialized) { failed++ } else { errored++ @@ -213,22 +216,22 @@ func TestManagerFinalizeTemplateDatabase(t *testing.T) { t.Fatalf("failed to finalize template database: %v", err) } - if !template.Ready() { - t.Error("template database is flagged as not ready") + if template.TemplateHash != hash { + t.Error("invalid template hash") } } func TestManagerFinalizeUntrackedTemplateDatabaseIsNotPossible(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + m, config := testManagerFromEnvWithConfig() if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } defer disconnectManager(t, m) - db, err := sql.Open("postgres", m.config.ManagerDatabaseConfig.ConnectionString()) + db, err := sql.Open("postgres", config.ManagerDatabaseConfig.ConnectionString()) if err != nil { t.Fatalf("failed to open connection to manager database: %v", err) } @@ -239,12 +242,12 @@ func TestManagerFinalizeUntrackedTemplateDatabaseIsNotPossible(t *testing.T) { } hash := "hashinghash" - dbName := fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) + dbName := fmt.Sprintf("%s_%s_%s", config.DatabasePrefix, config.TemplateDatabasePrefix, hash) if _, err := db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(dbName))); err != nil { t.Fatalf("failed to manually drop template database %q: %v", dbName, err) } - if _, err := db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(m.config.ManagerDatabaseConfig.Username), pq.QuoteIdentifier(m.config.TemplateDatabaseTemplate))); err != nil { + if _, err := db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(config.ManagerDatabaseConfig.Username), pq.QuoteIdentifier(config.TemplateDatabaseTemplate))); err != nil { t.Fatalf("failed to manually create template database %q: %v", dbName, err) } @@ -299,49 +302,54 @@ func TestManagerGetTestDatabase(t *testing.T) { t.Fatalf("failed to get test database: %v", err) } - if !test.Ready() { - t.Error("test database is flagged not ready") - } - verifyTestDB(t, test) } -// disabled as we were running into timing issues -// func TestManagerGetTestDatabaseTimeout(t *testing.T) { -// ctx := context.Background() +func TestManagerGetTestDatabaseExtendPool(t *testing.T) { + ctx := context.Background() -// m := testManagerFromEnv() -// if err := m.Initialize(ctx); err != nil { -// t.Fatalf("initializing manager failed: %v", err) -// } + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TestDatabaseGetTimeout = 300 * time.Millisecond + cfg.PoolConfig.InitialPoolSize = 0 // this will be autotransformed to 1 during init + cfg.PoolConfig.MaxPoolSize = 10 + m, _ := testManagerWithConfig(cfg) -// defer disconnectManager(t, m) + if err := m.Initialize(ctx); err != nil { + t.Fatalf("initializing manager failed: %v", err) + } -// hash := "hashinghash" + defer disconnectManager(t, m) -// template, err := m.InitializeTemplateDatabase(ctx, hash) -// if err != nil { -// t.Fatalf("failed to initialize template database: %v", err) -// } + hash := "hashinghash" -// populateTemplateDB(t, template) + template, err := m.InitializeTemplateDatabase(ctx, hash) + if err != nil { + t.Fatalf("failed to initialize template database: %v", err) + } -// if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { -// t.Fatalf("failed to finalize template database: %v", err) -// } + populateTemplateDB(t, template) -// ctxt, cancel := context.WithTimeout(ctx, 10*time.Nanosecond) -// defer cancel() + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } -// if _, err := m.GetTestDatabase(ctxt, hash); err != context.DeadlineExceeded { -// t.Fatalf("received unexpected error, got %v, want %v", err, context.DeadlineExceeded) -// } -// } + previousID := -1 + // assert than one by one pool will be extended + for i := 0; i < cfg.PoolConfig.MaxPoolSize; i++ { + testDB, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + assert.Equal(t, previousID+1, testDB.ID) + previousID = testDB.ID + } +} func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TemplateFinalizeTimeout = 1 * time.Second + m, _ := testManagerWithConfig(cfg) + if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -355,62 +363,33 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { t.Fatalf("failed to initialize template database: %v", err) } - testCh := make(chan error, 1) - go func() { - test, err := m.GetTestDatabase(ctx, hash) - if err != nil { - testCh <- err - return - } - - if !test.Ready() { - testCh <- errors.New("test database is flagged as not ready") - return - } - if !test.Dirty() { - testCh <- errors.New("test database is not flagged as dirty") - } + testCh := make(chan string, 2) - testCh <- nil - }() + g := errgroup.Group{} + g.Go(func() error { + _, err := m.GetTestDatabase(ctx, hash) + testCh <- "GET" + assert.NoError(t, err) + return nil + }) populateTemplateDB(t, template) - finalizeCh := make(chan error, 1) - go func() { + g.Go(func() error { time.Sleep(500 * time.Millisecond) - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - finalizeCh <- err - } - - finalizeCh <- nil - }() - - testDone := false - finalizeDone := false - for { - select { - case err := <-testCh: - if err != nil { - t.Fatalf("failed to get test database: %v", err) - } - - testDone = true - case err := <-finalizeCh: - if err != nil { - t.Fatalf("failed to finalize template database: %v", err) - } - - finalizeDone = true - } + _, err := m.FinalizeTemplateDatabase(ctx, hash) + testCh <- "FINALIZE" + assert.NoError(t, err) + return nil + }) - if testDone && finalizeDone { - break - } else if testDone && !finalizeDone { - t.Fatal("getting test database completed before finalizing template database") - } + if err := g.Wait(); err != nil { + t.Fatal(err) } + + first := <-testCh + assert.Equal(t, "FINALIZE", first) } func TestManagerGetTestDatabaseConcurrently(t *testing.T) { @@ -443,7 +422,10 @@ func TestManagerGetTestDatabaseConcurrently(t *testing.T) { wg.Add(testDBCount) for i := 0; i < testDBCount; i++ { - go getTestDB(&wg, errs, m) + go func() { + defer wg.Done() + getTestDB(ctx, errs, m) + }() } wg.Wait() @@ -476,7 +458,10 @@ func TestManagerGetTestDatabaseConcurrently(t *testing.T) { func TestManagerDiscardTemplateDatabase(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TemplateFinalizeTimeout = 200 * time.Millisecond + m, _ := testManagerWithConfig(cfg) + if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -499,7 +484,10 @@ func TestManagerDiscardTemplateDatabase(t *testing.T) { wg.Add(testDBCount) for i := 0; i < testDBCount; i++ { - go getTestDB(&wg, errs, m) + go func() { + defer wg.Done() + getTestDB(ctx, errs, m) + }() } if err := m.DiscardTemplateDatabase(ctx, hash); err != nil { @@ -521,7 +509,7 @@ func TestManagerDiscardTemplateDatabase(t *testing.T) { if err == nil { success++ } else { - // fmt.Println(err) + // t.Log(err) errored++ } } @@ -538,7 +526,10 @@ func TestManagerDiscardTemplateDatabase(t *testing.T) { func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TemplateFinalizeTimeout = 200 * time.Millisecond + m, _ := testManagerWithConfig(cfg) + if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -561,7 +552,10 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { wg.Add(testDBCount) for i := 0; i < testDBCount; i++ { - go getTestDB(&wg, errs, m) + go func() { + defer wg.Done() + getTestDB(ctx, errs, m) + }() } if err := m.DiscardTemplateDatabase(ctx, hash); err != nil { @@ -583,7 +577,7 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { if err == nil { success++ } else { - // fmt.Println(err) + t.Log(err) errored++ } } @@ -611,14 +605,15 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { } -func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { +func TestManagerGetAndReturnTestDatabase(t *testing.T) { ctx := context.Background() - cfg := DefaultManagerConfigFromEnv() - cfg.TestDatabaseMaxPoolSize = 3 - cfg.DatabasePrefix = "pgtestpool" // ensure we don't overlap with other pools running concurrently + cfg := manager.DefaultManagerConfigFromEnv() + cfg.PoolConfig.InitialPoolSize = 3 + cfg.PoolConfig.MaxPoolSize = 3 + cfg.TestDatabaseGetTimeout = 200 * time.Millisecond + m, _ := testManagerWithConfig(cfg) - m := New(cfg) if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -638,25 +633,29 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { t.Fatalf("failed to finalize template database: %v", err) } - seenIDs := map[int]bool{} - for i := 0; i <= cfg.TestDatabaseMaxPoolSize*3; i++ { + // request many more databases than initally added + for i := 0; i <= cfg.PoolConfig.MaxPoolSize*3; i++ { test, err := m.GetTestDatabase(ctx, hash) - if err != nil { - t.Fatalf("failed to get test database: %v", err) - } + assert.NoError(t, err) + assert.NotEmpty(t, test) - if _, ok := seenIDs[test.ID]; ok { - t.Errorf("received already seen test database ID %d", test.ID) - } - - seenIDs[test.ID] = true + // return testDB after usage + assert.NoError(t, m.ReturnTestDatabase(ctx, hash, test.ID)) } + + // discard the template + assert.NoError(t, m.DiscardTemplateDatabase(ctx, hash)) } -func TestManagerGetTestDatabaseForUnknownTemplate(t *testing.T) { +func TestManagerGetAndRecreateTestDatabase(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + cfg := manager.DefaultManagerConfigFromEnv() + cfg.PoolConfig.InitialPoolSize = 8 + cfg.PoolConfig.MaxPoolSize = 8 + cfg.TestDatabaseGetTimeout = 1000 * time.Millisecond + m, _ := testManagerWithConfig(cfg) + if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -665,15 +664,62 @@ func TestManagerGetTestDatabaseForUnknownTemplate(t *testing.T) { hash := "hashinghash" - if _, err := m.GetTestDatabase(ctx, hash); err == nil { - t.Fatal("succeeded in getting test database for unknown template") + template, err := m.InitializeTemplateDatabase(ctx, hash) + if err != nil { + t.Fatalf("failed to initialize template database: %v", err) } + + populateTemplateDB(t, template) + + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } + + // request many more databases than initally added + for i := 0; i <= cfg.PoolConfig.MaxPoolSize*5; i++ { + test, err := m.GetTestDatabase(ctx, hash) + + t.Logf("open %v", test.ID) + + assert.NoError(t, err) + assert.NotEmpty(t, test) + + db, err := sql.Open("postgres", test.Config.ConnectionString()) + require.NoError(t, err) + require.NoError(t, db.PingContext(ctx)) + + // assert that it's always initialized according to a template + var res int + assert.NoError(t, db.QueryRowContext(ctx, "SELECT COUNT(*) FROM pilots WHERE name = 'Anna'").Scan(&res)) + assert.Equal(t, 0, res, i) + + // make changes into test DB + _, err = db.ExecContext(ctx, `INSERT INTO pilots (id, "name", created_at, updated_at) VALUES ('844a1a87-5ef7-4309-8814-0f1054751156', 'Anna', '2023-03-23 09:44:00.548', '2023-03-23 09:44:00.548');`) + require.NoError(t, err) + assert.NoError(t, db.QueryRowContext(ctx, "SELECT COUNT(*) FROM pilots WHERE name = 'Anna'").Scan(&res)) + assert.Equal(t, 1, res) + + t.Logf("close %v", test.ID) + db.Close() + + // recreate testDB after usage + assert.NoError(t, m.RecreateTestDatabase(ctx, hash, test.ID)) + } + + // discard the template + assert.NoError(t, m.DiscardTemplateDatabase(ctx, hash)) } -func TestManagerReturnTestDatabase(t *testing.T) { +func TestManagerGetTestDatabaseDontReturn(t *testing.T) { + ctx := context.Background() - m := testManagerFromEnv() + cfg := manager.DefaultManagerConfigFromEnv() + cfg.PoolConfig.InitialPoolSize = 5 + cfg.PoolConfig.MaxPoolSize = 5 + cfg.TestDatabaseGetTimeout = time.Second * 5 + m, _ := testManagerWithConfig(cfg) + if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -682,7 +728,7 @@ func TestManagerReturnTestDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash /*enableDBRecreate */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -693,31 +739,127 @@ func TestManagerReturnTestDatabase(t *testing.T) { t.Fatalf("failed to finalize template database: %v", err) } - test, err := m.GetTestDatabase(ctx, hash) - if err != nil { - t.Fatalf("failed to get test database: %v", err) + var wg sync.WaitGroup + for i := 0; i < cfg.PoolConfig.MaxPoolSize*5; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + testDB, err := m.GetTestDatabase(ctx, hash) + require.NoError(t, err, i) + db, err := sql.Open("postgres", testDB.Config.ConnectionString()) + assert.NoError(t, err) + + // keep an open DB connection for a while + time.Sleep(20 * time.Millisecond) + + // now disconnect + db.Close() + // don't return + }(i) } + wg.Wait() - if err := m.ReturnTestDatabase(ctx, hash, test.ID); err != nil { - t.Fatalf("failed to return test database: %v", err) + // discard the template + assert.NoError(t, m.DiscardTemplateDatabase(ctx, hash)) +} + +func TestManagerGetTestDatabaseForUnknownTemplate(t *testing.T) { + ctx := context.Background() + + m := testManagerFromEnv() + if err := m.Initialize(ctx); err != nil { + t.Fatalf("initializing manager failed: %v", err) } - originalID := test.ID + defer disconnectManager(t, m) - test, err = m.GetTestDatabase(ctx, hash) + hash := "hashinghash" + + if _, err := m.GetTestDatabase(ctx, hash); err == nil { + t.Fatal("succeeded in getting test database for unknown template") + } +} + +func TestManagerReturnTestDatabase(t *testing.T) { + ctx := context.Background() + + cfg := manager.DefaultManagerConfigFromEnv() + cfg.PoolConfig.InitialPoolSize = 1 + cfg.PoolConfig.MaxPoolSize = 10 + cfg.TestDatabaseGetTimeout = 200 * time.Millisecond + + m, _ := testManagerWithConfig(cfg) + + if err := m.Initialize(ctx); err != nil { + t.Fatalf("initializing manager failed: %v", err) + } + + defer disconnectManager(t, m) + + hash := "hashinghash" + + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { - t.Fatalf("failed to get additional test database: %v", err) + t.Fatalf("failed to initialize template database: %v", err) } - if test.ID != originalID { - t.Fatalf("failed to reuse returned test database, got ID %d, want ID %d", test.ID, originalID) + populateTemplateDB(t, template) + + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) } + + testDB1, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + // open the connection and modify the test DB + db, err := sql.Open("postgres", testDB1.Config.ConnectionString()) + require.NoError(t, err) + require.NoError(t, db.PingContext(ctx)) + _, err = db.ExecContext(ctx, `INSERT INTO pilots (id, "name", created_at, updated_at) VALUES ('777a1a87-5ef7-4309-8814-0f1054751177', 'Snufkin', '2023-07-13 09:44:00.548', '2023-07-13 09:44:00.548')`) + assert.NoError(t, err, testDB1.ID) + db.Close() + // finally return it + assert.NoError(t, m.ReturnTestDatabase(ctx, hash, testDB1.ID)) + + // regetting these databases is quite random. Let's try to get the same id again... + // on first GET call the pool has been extended + // we will get the newly created DB + testDB2, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + + // next in 'ready' channel should be the returned DB + testDB3, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + + // restored db + var targetConnectionString string + if testDB2.ID == testDB1.ID { + targetConnectionString = testDB2.Config.ConnectionString() + } else if testDB3.ID == testDB1.ID { + targetConnectionString = testDB3.Config.ConnectionString() + } else { + t.Fatal("We should have been able to get the previously returned database.") + } + + // assert that it hasn't been cleaned but just reused directly + db, err = sql.Open("postgres", targetConnectionString) + require.NoError(t, err) + require.NoError(t, db.PingContext(ctx)) + + row := db.QueryRowContext(ctx, "SELECT name FROM pilots WHERE id = '777a1a87-5ef7-4309-8814-0f1054751177'") + assert.NoError(t, row.Err()) + var name string + assert.NoError(t, row.Scan(&name)) + assert.Equal(t, "Snufkin", name) + db.Close() + } func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + m, config := testManagerFromEnvWithConfig() if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -737,7 +879,7 @@ func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { t.Fatalf("failed to finalize template database: %v", err) } - db, err := sql.Open("postgres", m.config.ManagerDatabaseConfig.ConnectionString()) + db, err := sql.Open("postgres", config.ManagerDatabaseConfig.ConnectionString()) if err != nil { t.Fatalf("failed to open connection to manager database: %v", err) } @@ -748,17 +890,17 @@ func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { } id := 321 - dbName := fmt.Sprintf("%s_%s_%s_%d", m.config.DatabasePrefix, m.config.TestDatabasePrefix, hash, id) + dbName := fmt.Sprintf("%s_%s_%s_%d", config.DatabasePrefix, config.PoolConfig.TestDBNamePrefix, hash, id) if _, err := db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(dbName))); err != nil { t.Fatalf("failed to manually drop template database %q: %v", dbName, err) } - if _, err := db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(m.config.ManagerDatabaseConfig.Username), pq.QuoteIdentifier(template.Config.Database))); err != nil { + if _, err := db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(config.ManagerDatabaseConfig.Username), pq.QuoteIdentifier(template.Config.Database))); err != nil { t.Fatalf("failed to manually create template database %q: %v", dbName, err) } - if err := m.ReturnTestDatabase(ctx, hash, id); err != nil { - t.Fatalf("failed to return manually created test database: %v", err) + if err := m.ReturnTestDatabase(ctx, hash, id); err == nil { + t.Fatalf("succeeded to return manually created test database: %v", err) // this should not work! } } @@ -811,19 +953,49 @@ func TestManagerMultiFinalize(t *testing.T) { populateTemplateDB(t, template) - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - t.Fatalf("failed to finalize template database: %v", err) + var wg sync.WaitGroup + wg.Add(3) + + errChan := make(chan error, 3) + finalize := func(errChan chan<- error) { + t := t + _, err := m.FinalizeTemplateDatabase(ctx, hash) + if errors.Is(err, manager.ErrTemplateAlreadyInitialized) { + errChan <- err + return + } + if err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } } + go func() { + defer wg.Done() + finalize(errChan) + }() + go func() { + defer wg.Done() + finalize(errChan) + }() + go func() { + defer wg.Done() + finalize(errChan) + }() + + wg.Wait() + + errCount := len(errChan) + assert.Equal(t, 2, errCount) - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - t.Fatalf("failed to finalize a second time template database (bailout already ready): %v", err) - } } func TestManagerClearTrackedTestDatabases(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + cfg := manager.DefaultManagerConfigFromEnv() + // there are no db added in background + cfg.PoolConfig.InitialPoolSize = 0 + m, _ := testManagerWithConfig(cfg) + if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -850,9 +1022,11 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { originalID := test.ID - if err := m.ClearTrackedTestDatabases(hash); err != nil { + // clear it twice - because why not + if err := m.ClearTrackedTestDatabases(ctx, hash); err != nil { t.Fatalf("failed to clear tracked test databases: %v", err) } + assert.ErrorIs(t, m.ClearTrackedTestDatabases(ctx, hash), manager.ErrTemplateNotFound) test, err = m.GetTestDatabase(ctx, hash) if err != nil { diff --git a/pkg/manager/template_database.go b/pkg/manager/template_database.go deleted file mode 100644 index 39bfc4c..0000000 --- a/pkg/manager/template_database.go +++ /dev/null @@ -1,8 +0,0 @@ -package manager - -type TemplateDatabase struct { - Database `json:"database"` - - nextTestID int - testDatabases []*TestDatabase -} diff --git a/pkg/manager/test_database.go b/pkg/manager/test_database.go deleted file mode 100644 index 1c0628b..0000000 --- a/pkg/manager/test_database.go +++ /dev/null @@ -1,40 +0,0 @@ -package manager - -type TestDatabase struct { - Database `json:"database"` - - ID int `json:"id"` - - dirty bool -} - -func (t *TestDatabase) Dirty() bool { - t.RLock() - defer t.RUnlock() - - return t.dirty -} - -func (t *TestDatabase) FlagAsDirty() { - t.Lock() - defer t.Unlock() - - t.dirty = true -} - -func (t *TestDatabase) FlagAsClean() { - t.Lock() - defer t.Unlock() - - t.dirty = false -} - -func (t *TestDatabase) ReadyForTest() bool { - return t.Ready() && !t.Dirty() -} - -type ByID []*TestDatabase - -func (i ByID) Len() int { return len(i) } -func (a ByID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByID) Less(i, j int) bool { return a[i].ID < a[j].ID } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go new file mode 100644 index 0000000..b34cae8 --- /dev/null +++ b/pkg/pool/pool.go @@ -0,0 +1,663 @@ +package pool + +import ( + "context" + "errors" + "runtime/trace" + "sync" + "time" + + "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/util" + "github.com/rs/zerolog" +) + +var ( + ErrPoolFull = errors.New("database pool is full") + ErrInvalidState = errors.New("database state is not valid for this operation") + ErrInvalidIndex = errors.New("invalid database index (id)") + ErrTimeout = errors.New("timeout when waiting for ready db") + ErrTestDBInUse = errors.New("test database is in use, close the connection before dropping") +) + +type dbState int // Indicates a current DB state. + +const ( + dbStateReady dbState = iota // Initialized according to a template and ready to be picked up. + dbStateDirty // Taken by a client and potentially currently in use. + dbStateRecreating // In the process of being recreated (to prevent concurrent cleans) +) + +type existingDB struct { + state dbState + db.TestDatabase + + // To prevent auto-cleans of a testdatabase on the dirty channel directly after it was issued as ready, + // each testdatabase gets a timestamp assigned after which auto-cleaning it generally allowed (unlock + // and recreate do not respect this). This timeout is typically very low and should only be neccessary + // to be tweaked in scenarios in which the pool is overloaded by requests. + // Prefer to tweak InitialPoolSize (the always ready dbs) and MaxPoolSize instead if you have issues here. + blockAutoCleanDirtyUntil time.Time + + // increased after each recreation, useful for sleepy recreating workers to check if we still operate on the same gen. + generation uint +} + +type workerTask string + +const ( + workerTaskStop = "STOP" + workerTaskExtend = "EXTEND" + workerTaskAutoCleanDirty = "CLEAN_DIRTY" +) + +// HashPool holds a test DB pool for a certain hash. Each HashPool is running cleanup workers in background. +type HashPool struct { + dbs []existingDB + ready chan int // ID of initalized DBs according to a template, ready to pick them up + dirty chan int // ID of DBs that were given away and need to be recreated to reuse them + recreating chan struct{} // tracks currently running recreating ops + + recreateDB recreateTestDBFunc + templateDB db.Database + PoolConfig + + sync.RWMutex + wg sync.WaitGroup + + tasksChan chan workerTask + running bool + workerContext context.Context // the ctx all background workers will receive (nil if not yet started) +} + +// NewHashPool creates new hash pool with the given config. +// Starts the workers to extend the pool in background up to requested inital number. +func NewHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc) *HashPool { + + pool := &HashPool{ + dbs: make([]existingDB, 0, cfg.MaxPoolSize), + ready: make(chan int, cfg.MaxPoolSize), + dirty: make(chan int, cfg.MaxPoolSize), + recreating: make(chan struct{}, cfg.MaxPoolSize), + + recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, initDBFunc), + templateDB: templateDB, + PoolConfig: cfg, + + tasksChan: make(chan workerTask, cfg.MaxPoolSize+1), + running: false, + } + + return pool +} + +func (pool *HashPool) Start() { + + log := pool.getPoolLogger(context.Background(), "Start") + pool.Lock() + log.Debug().Msg("starting...") + + defer pool.Unlock() + + if pool.running { + log.Warn().Msg("bailout already running!") + return + } + + pool.running = true + + ctx, cancel := context.WithCancel(context.Background()) + pool.workerContext = ctx + + for i := 0; i < pool.InitialPoolSize; i++ { + pool.tasksChan <- workerTaskExtend + } + + pool.wg.Add(1) + go func() { + defer pool.wg.Done() + pool.controlLoop(ctx, cancel) + }() + + log.Info().Msg("started!") +} + +func (pool *HashPool) Stop() { + + log := pool.getPoolLogger(context.Background(), "Stop") + log.Debug().Msg("stopping...") + + pool.Lock() + if !pool.running { + log.Warn().Msg("bailout already stopped!") + return + } + pool.running = false + pool.Unlock() + + pool.tasksChan <- workerTaskStop + pool.wg.Wait() + pool.workerContext = nil + log.Warn().Msg("stopped!") +} + +func (pool *HashPool) GetTestDatabase(ctx context.Context, timeout time.Duration) (db db.TestDatabase, err error) { + var index int + + log := pool.getPoolLogger(ctx, "GetTestDatabase") + log.Trace().Msg("waiting for ready ID...") + + select { + case <-time.After(timeout): + err = ErrTimeout + log.Error().Err(err).Dur("timeout", timeout).Msg("timeout") + return + case <-ctx.Done(): + err = ctx.Err() + log.Warn().Err(err).Msg("ctx done") + return + case index = <-pool.ready: + } + + log = log.With().Int("id", index).Logger() + log.Trace().Msg("got ready testdatabase!") + + reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") + pool.Lock() + defer pool.Unlock() + reg.End() + + // sanity check, should never happen + if index < 0 || index >= len(pool.dbs) { + err = ErrInvalidIndex + log.Error().Err(err).Int("dbs", len(pool.dbs)).Msg("index out of bounds!") + return + } + + testDB := pool.dbs[index] + // sanity check, should never happen - we got this index from 'ready' channel + if testDB.state != dbStateReady { + err = ErrInvalidState + log.Error().Err(err).Msgf("testdatabase is not in ready state=%v!", testDB.state) + return + } + + // flag as dirty and block auto clean until + testDB.state = dbStateDirty + testDB.blockAutoCleanDirtyUntil = time.Now().Add(pool.TestDatabaseMinimalLifetime) + + pool.dbs[index] = testDB + pool.dirty <- index + + if len(pool.dbs) < pool.PoolConfig.MaxPoolSize { + log.Trace().Msg("push workerTaskExtend") + pool.tasksChan <- workerTaskExtend + } + + // we try to ensure that InitialPoolSize count is staying ready + // thus, we try to move the oldest dirty dbs into recreating with the workerTaskAutoCleanDirty + if len(pool.dbs) >= pool.PoolConfig.MaxPoolSize && (len(pool.ready)+len(pool.recreating)) < pool.InitialPoolSize { + log.Trace().Msg("push workerTaskAutoCleanDirty") + pool.tasksChan <- workerTaskAutoCleanDirty + } + + pool.unsafeTraceLogStats(log) + + return testDB.TestDatabase, nil +} + +func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan workerTask, MaxParallelTasks int) { + + log := pool.getPoolLogger(ctx, "workerTaskLoop") + log.Debug().Msg("starting...") + + handlers := map[workerTask]func(ctx context.Context) error{ + workerTaskExtend: ignoreErrs(pool.extend, ErrPoolFull, context.Canceled), + workerTaskAutoCleanDirty: ignoreErrs(pool.autoCleanDirty, context.Canceled), + } + + // to limit the number of running goroutines. + var semaphore = make(chan struct{}, MaxParallelTasks) + + for task := range taskChan { + handler, ok := handlers[task] + if !ok { + log.Error().Msgf("invalid task: %s", task) + continue + } + + select { + case <-ctx.Done(): + log.Warn().Err(ctx.Err()).Msg("ctx done!") + return + case semaphore <- struct{}{}: + } + + pool.wg.Add(1) + go func(task workerTask) { + + defer func() { + pool.wg.Done() + <-semaphore + }() + + log.Debug().Msgf("task=%v", task) + + if err := handler(ctx); err != nil { + log.Error().Err(err).Msgf("task=%v FAILED!", task) + } + }(task) + + } +} + +func (pool *HashPool) controlLoop(ctx context.Context, cancel context.CancelFunc) { + + log := pool.getPoolLogger(ctx, "controlLoop") + log.Debug().Msg("starting...") + + defer cancel() + + workerTasksChan := make(chan workerTask, len(pool.tasksChan)) + pool.wg.Add(1) + go func() { + defer pool.wg.Done() + pool.workerTaskLoop(ctx, workerTasksChan, pool.MaxParallelTasks) + }() + + for task := range pool.tasksChan { + if task == workerTaskStop { + log.Debug().Msg("stopping...") + close(workerTasksChan) + cancel() + return + } + + select { + case workerTasksChan <- task: + default: + // don't wait until task can be added, + // be available to receive Stop message at any time + } + } +} + +// ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). +func (pool *HashPool) ReturnTestDatabase(ctx context.Context, id int) error { + + log := pool.getPoolLogger(ctx, "ReturnTestDatabase").With().Int("id", id).Logger() + log.Debug().Msg("returning...") + + pool.Lock() + defer pool.Unlock() + + if err := ctx.Err(); err != nil { + // client vanished + log.Warn().Err(err).Msg("bailout client vanished!") + return err + } + + if id < 0 || id >= len(pool.dbs) { + log.Warn().Int("dbs", len(pool.dbs)).Msg("bailout invalid index!") + return ErrInvalidIndex + } + + // check if db is in the correct state + testDB := pool.dbs[id] + if testDB.state != dbStateDirty { + log.Warn().Int("dbs", len(pool.dbs)).Msgf("bailout invalid state=%v.", testDB.state) + return nil + } + + // directly change the state to 'ready' + testDB.state = dbStateReady + pool.dbs[id] = testDB + + // remove id from dirty and add it to ready channel + pool.excludeIDFromChannel(pool.dirty, id) + pool.ready <- id + + pool.unsafeTraceLogStats(log) + + return nil +} + +func (pool *HashPool) excludeIDFromChannel(ch chan int, excludeID int) { + + // The testDB identified by overgiven id may still in a specific channel (typically dirty). We want to exclude it. + // We need to explicitly remove it from there by filtering the current channel to a tmp channel. + // We finally close the tmp channel and flush it onto the specific channel again. + // The id is now no longer in the channel. + filtered := make(chan int, pool.MaxPoolSize) + + var id int + for loop := true; loop; { + select { + case id = <-ch: + if id != excludeID { + filtered <- id + } + default: + loop = false + break + } + } + + // filtered now has all filtered values without the above id, redirect the other ids back to the specific channel. + // close so we can range over it... + close(filtered) + + for id := range filtered { + ch <- id + } +} + +// RecreateTestDatabase prioritizes the test DB to be recreated next via the dirty worker. +func (pool *HashPool) RecreateTestDatabase(ctx context.Context, id int) error { + + log := pool.getPoolLogger(ctx, "RecreateTestDatabase").With().Int("id", id).Logger() + log.Debug().Msg("flag testdatabase for recreation...") + + pool.RLock() + + if id < 0 || id >= len(pool.dbs) { + log.Warn().Int("dbs", len(pool.dbs)).Msg("bailout invalid index!") + pool.RUnlock() + return ErrInvalidIndex + } + + pool.RUnlock() + + if err := ctx.Err(); err != nil { + // client vanished + log.Warn().Err(err).Msg("bailout client vanished!") + return err + } + + // exclude from the normal dirty channel, force recreation in a background worker... + pool.excludeIDFromChannel(pool.dirty, id) + + // directly spawn a new worker in the bg (with the same ctx as the typical workers) + // note that this runs unchained, meaning we do not care about errors that may happen via this bg task + //nolint:errcheck + go pool.recreateDatabaseGracefully(pool.workerContext, id) + + pool.unsafeTraceLogStats(log) + return nil +} + +// recreateDatabaseGracefully continuosly tries to recreate the testdatabase and will retry/block until it succeeds +func (pool *HashPool) recreateDatabaseGracefully(ctx context.Context, id int) error { + + log := pool.getPoolLogger(ctx, "recreateDatabaseGracefully").With().Int("id", id).Logger() + log.Debug().Msg("recreating...") + + if err := ctx.Err(); err != nil { + // pool closed in the meantime. + log.Error().Err(err).Msg("bailout pre locking ctx err") + return err + } + + pool.Lock() + + if state := pool.dbs[id].state; state != dbStateDirty { + // nothing to do + log.Error().Msgf("bailout not dbStateDirty state=%v", state) + pool.Unlock() + return nil + } + + testDB := pool.dbs[id] + + // set state recreating... + pool.dbs[id].state = dbStateRecreating + pool.dbs[id] = testDB + + pool.Unlock() + + pool.recreating <- struct{}{} + + defer func() { + <-pool.recreating + }() + + try := 0 + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + try++ + + log.Trace().Int("try", try).Msg("trying to recreate...") + err := pool.recreateDB(ctx, &testDB) + if err != nil { + // only still connected errors are worthy a retry + if errors.Is(err, ErrTestDBInUse) { + + backoff := time.Duration(try) * pool.PoolConfig.TestDatabaseRetryRecreateSleepMin + if backoff > pool.PoolConfig.TestDatabaseRetryRecreateSleepMax { + backoff = pool.PoolConfig.TestDatabaseRetryRecreateSleepMax + } + + log.Warn().Int("try", try).Dur("backoff", backoff).Msg("DB is still in use, will retry...") + time.Sleep(backoff) + } else { + + log.Error().Int("try", try).Err(err).Msg("bailout worker task DB error while cleanup!") + return err + } + } else { + goto MoveToReady + } + } + } + +MoveToReady: + pool.Lock() + defer pool.Unlock() + + if ctx.Err() != nil { + // pool closed in the meantime. + return ctx.Err() + } + + if pool.dbs[id].state == dbStateReady { + // oups, it has been cleaned by another worker already + // we won't add it to the 'ready' channel to avoid duplication + log.Warn().Msg("bailout DB has be cleaned by another worker as its already ready, skipping readd to ready channel!") + return nil + } + + // increase the generation of the testdb (as we just recreated it) and move into ready! + pool.dbs[id].generation++ + pool.dbs[id].state = dbStateReady + + pool.ready <- pool.dbs[id].ID + + log.Debug().Uint("generation", pool.dbs[id].generation).Msg("ready") + pool.unsafeTraceLogStats(log) + return nil +} + +// autoCleanDirty reads 'dirty' channel and cleans up a test DB with the received index. +// When the DB is recreated according to a template, its index goes to the 'ready' channel. +// Note that we generally gurantee FIFO when it comes to auto-cleaning as long as no manual unlock/recreates happen. +func (pool *HashPool) autoCleanDirty(ctx context.Context) error { + + log := pool.getPoolLogger(ctx, "autoCleanDirty") + log.Trace().Msg("autocleaning...") + + ctx, task := trace.NewTask(ctx, "worker_clean_dirty") + defer task.End() + + var id int + select { + case id = <-pool.dirty: + case <-ctx.Done(): + return ctx.Err() + default: + // nothing to do + log.Trace().Msg("noop") + return nil + } + + // got id... + log = log.With().Int("id", id).Logger() + log.Trace().Msg("checking cleaning prerequisites...") + + regLock := trace.StartRegion(ctx, "worker_wait_for_rlock_hash_pool") + pool.RLock() + regLock.End() + + if id < 0 || id >= len(pool.dbs) { + // sanity check, should never happen + log.Warn().Int("dbs", len(pool.dbs)).Msg("bailout invalid index!") + pool.RUnlock() + return ErrInvalidIndex + } + + blockedUntil := time.Until(pool.dbs[id].blockAutoCleanDirtyUntil) + generation := pool.dbs[id].generation + + log = log.With().Dur("blockedUntil", blockedUntil).Uint("generation", generation).Logger() + + pool.RUnlock() + + // immediately pass to pool recreate + if blockedUntil <= 0 { + log.Trace().Msg("clean now (immediate)!") + return pool.recreateDatabaseGracefully(ctx, id) + } + + // else we need to wait until we are allowed to work with it! + // we block auto-cleaning until we are allowed to... + log.Warn().Msg("sleeping before being allowed to clean...") + time.Sleep(blockedUntil) + + // we need to check that the testDB.generation did not change since we slept + // (which would indicate that the database was already unlocked/recreated by someone else in the meantime) + pool.RLock() + + if pool.dbs[id].generation != generation || pool.dbs[id].state != dbStateDirty { + log.Error().Msgf("bailout old generation=%v vs new generation=%v state=%v", generation, pool.dbs[id].generation, pool.dbs[id].state) + pool.RUnlock() + return nil + } + + pool.RUnlock() + + log.Trace().Msg("clean now (after sleep has happenend)!") + return pool.recreateDatabaseGracefully(ctx, id) +} + +func ignoreErrs(f func(ctx context.Context) error, errs ...error) func(context.Context) error { + return func(ctx context.Context) error { + err := f(ctx) + for _, e := range errs { + if errors.Is(err, e) { + return nil + } + } + return err + } +} + +func (pool *HashPool) extend(ctx context.Context) error { + + log := pool.getPoolLogger(ctx, "extend") + log.Trace().Msg("extending...") + + ctx, task := trace.NewTask(ctx, "worker_extend") + defer task.End() + + reg := trace.StartRegion(ctx, "worker_wait_for_lock_hash_pool") + pool.Lock() + reg.End() + + // get index of a next test DB - its ID + index := len(pool.dbs) + if index == cap(pool.dbs) { + log.Error().Int("dbs", len(pool.dbs)).Int("cap", cap(pool.dbs)).Err(ErrPoolFull).Msg("pool is full") + pool.Unlock() + return ErrPoolFull + } + + // initalization of a new DB using template config, it must start in state dirty! + newTestDB := existingDB{ + state: dbStateDirty, + TestDatabase: db.TestDatabase{ + Database: db.Database{ + TemplateHash: pool.templateDB.TemplateHash, + Config: pool.templateDB.Config, + }, + ID: index, + }, + } + // set DB name + newTestDB.Database.Config.Database = makeDBName(pool.TestDBNamePrefix, pool.templateDB.TemplateHash, index) + + // add new test DB to the pool (currently it's dirty!) + pool.dbs = append(pool.dbs, newTestDB) + + log.Trace().Int("id", index).Msg("appended as dirty, recreating...") + pool.unsafeTraceLogStats(log) + pool.Unlock() + + // forced recreate... + return pool.recreateDatabaseGracefully(ctx, index) +} + +func (pool *HashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) error { + + log := pool.getPoolLogger(ctx, "RemoveAll") + + // stop all workers + pool.Stop() + + // wait until all current "recreating" tasks are finished... + + pool.Lock() + defer pool.Unlock() + + if len(pool.dbs) == 0 { + log.Error().Msg("bailout no dbs.") + return nil + } + + // remove from back to be able to repeat operation in case of error + for id := len(pool.dbs) - 1; id >= 0; id-- { + testDB := pool.dbs[id].TestDatabase + + if err := removeFunc(ctx, testDB); err != nil { + log.Error().Int("id", id).Err(err).Msg("removeFunc testdatabase err") + return err + } + + if len(pool.dbs) > 1 { + pool.dbs = pool.dbs[:len(pool.dbs)-1] + } + + pool.excludeIDFromChannel(pool.dirty, id) + pool.excludeIDFromChannel(pool.ready, id) + log.Debug().Int("id", id).Msg("testdatabase removed!") + } + + // close all only if removal of all succeeded + pool.dbs = nil + close(pool.tasksChan) + + pool.unsafeTraceLogStats(log) + + return nil +} + +func (pool *HashPool) getPoolLogger(ctx context.Context, poolFunction string) zerolog.Logger { + return util.LogFromContext(ctx).With().Str("poolHash", pool.templateDB.TemplateHash).Str("poolFn", poolFunction).Logger() +} + +// unsafeTraceLogStats logs stats of this pool. Attention: pool should be read or write locked! +func (pool *HashPool) unsafeTraceLogStats(log zerolog.Logger) { + log.Trace().Int("ready", len(pool.ready)).Int("dirty", len(pool.dirty)).Int("recreating", len(pool.recreating)).Int("tasksChan", len(pool.tasksChan)).Int("dbs", len(pool.dbs)).Int("initial", pool.PoolConfig.InitialPoolSize).Int("max", pool.PoolConfig.MaxPoolSize).Msg("pool stats") +} diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go new file mode 100644 index 0000000..82db735 --- /dev/null +++ b/pkg/pool/pool_collection.go @@ -0,0 +1,223 @@ +package pool + +import ( + "context" + "errors" + "fmt" + "runtime/trace" + "sync" + "time" + + "github.com/allaboutapps/integresql/pkg/db" +) + +var ErrUnknownHash = errors.New("no database pool exists for this hash") + +// we explicitly want to access this struct via pool.PoolConfig, thus we disable revive for the next line +type PoolConfig struct { //nolint:revive + InitialPoolSize int // Initial number of ready DBs prepared in background + MaxPoolSize int // Maximal pool size that won't be exceeded + TestDBNamePrefix string // Test-Database prefix: DatabasePrefix_TestDBNamePrefix_HASH_ID + MaxParallelTasks int // Maximal number of pool tasks running in parallel. Must be a number greater or equal 1. + TestDatabaseRetryRecreateSleepMin time.Duration // Minimal time to wait after a test db recreate has failed (e.g. as client is still connected). Subsequent retries multiply this values until... + TestDatabaseRetryRecreateSleepMax time.Duration // ... the maximum possible sleep time between retries (e.g. 3 seconds) is reached. + TestDatabaseMinimalLifetime time.Duration // After a testdatabase transitions from ready to dirty, always block auto-recreation for this duration (except manual recreate). + + disableWorkerAutostart bool // test only private flag for starting without background worker task system +} + +// we explicitly want to access this struct via pool.PoolCollection, thus we disable revive for the next line +type PoolCollection struct { //nolint:revive + PoolConfig + + pools map[string]*HashPool // map[hash] + mutex sync.RWMutex +} + +// enableDBRecreate set to false will allow reusing test databases that are marked as 'dirty'. +// Otherwise, test DB has to be returned when no longer needed and there are higher chances of getting ErrPoolFull when requesting a new DB. +func NewPoolCollection(cfg PoolConfig) *PoolCollection { + return &PoolCollection{ + pools: make(map[string]*HashPool), + PoolConfig: cfg, + } +} + +// RecreateDBFunc callback executed when a pool is extended or the DB cleaned up by a worker. +type RecreateDBFunc func(ctx context.Context, testDB db.TestDatabase, templateName string) error + +// RemoveDBFunc callback executed to remove a database +type RemoveDBFunc func(ctx context.Context, testDB db.TestDatabase) error + +func makeActualRecreateTestDBFunc(templateName string, userRecreateFunc RecreateDBFunc) recreateTestDBFunc { + return func(ctx context.Context, testDBWrapper *existingDB) error { + return userRecreateFunc(ctx, testDBWrapper.TestDatabase, templateName) + } +} + +type recreateTestDBFunc func(context.Context, *existingDB) error + +// InitHashPool creates a new pool with a given template hash and starts the cleanup workers. +func (p *PoolCollection) InitHashPool(_ context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) { + p.mutex.Lock() + defer p.mutex.Unlock() + + cfg := p.PoolConfig + + // Create a new HashPool + pool := NewHashPool(cfg, templateDB, initDBFunc) + + if !cfg.disableWorkerAutostart { + pool.Start() + } + + // pool is ready + p.pools[pool.templateDB.TemplateHash] = pool +} + +// Start is used to start all background workers +func (p *PoolCollection) Start() { + p.mutex.RLock() + defer p.mutex.RUnlock() + + for _, pool := range p.pools { + pool.Start() + } +} + +// Stop is used to stop all background workers +func (p *PoolCollection) Stop() { + p.mutex.RLock() + defer p.mutex.RUnlock() + + for _, pool := range p.pools { + pool.Stop() + } +} + +// GetTestDatabase picks up a ready to use test DB. It waits the given timeout until a DB is available. +// If there is no DB ready and time elapses, ErrTimeout is returned. +// Otherwise, the obtained test DB is marked as 'dirty' and can be reused only if returned to the pool. +func (p *PoolCollection) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { + + pool, err := p.getPool(ctx, hash) + if err != nil { + return db, err + } + + return pool.GetTestDatabase(ctx, timeout) +} + +// ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). +func (p *PoolCollection) ReturnTestDatabase(ctx context.Context, hash string, id int) error { + pool, err := p.getPool(ctx, hash) + if err != nil { + return err + } + + return pool.ReturnTestDatabase(ctx, id) +} + +// RecreateTestDatabase recreates the test DB according to the template and returns it back to the pool. +func (p *PoolCollection) RecreateTestDatabase(ctx context.Context, hash string, id int) error { + pool, err := p.getPool(ctx, hash) + if err != nil { + return err + } + + return pool.RecreateTestDatabase(ctx, id) +} + +// RemoveAllWithHash removes a pool with a given template hash. +// All background workers belonging to this pool are stopped. +func (p *PoolCollection) RemoveAllWithHash(ctx context.Context, hash string, removeFunc RemoveDBFunc) error { + pool, collUnlock, err := p.getPoolLockCollection(ctx, hash) + defer collUnlock() + + if err != nil { + return err + } + + if err := pool.RemoveAll(ctx, removeFunc); err != nil { + return err + } + + // all DBs have been removed, now remove the pool itself + delete(p.pools, hash) + + return nil +} + +// RemoveAll removes all tracked pools. +func (p *PoolCollection) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) error { + p.mutex.Lock() + defer p.mutex.Unlock() + + for hash, pool := range p.pools { + if err := pool.RemoveAll(ctx, removeFunc); err != nil { + return err + } + + delete(p.pools, hash) + } + + return nil +} + +// MakeDBName makes a test DB name with the configured prefix, template hash and ID of the DB. +func (p *PoolCollection) MakeDBName(hash string, id int) string { + p.mutex.RLock() + defer p.mutex.RUnlock() + + return makeDBName(p.PoolConfig.TestDBNamePrefix, hash, id) +} + +func makeDBName(testDBPrefix string, hash string, id int) string { + // db name has an ID in suffix + return fmt.Sprintf("%s%s_%03d", testDBPrefix, hash, id) +} + +func (p *PoolCollection) getPool(ctx context.Context, hash string) (pool *HashPool, err error) { + reg := trace.StartRegion(ctx, "wait_for_rlock_main_pool") + p.mutex.RLock() + defer p.mutex.RUnlock() + reg.End() + + pool, ok := p.pools[hash] + if !ok { + // no such pool + return nil, ErrUnknownHash + } + + return pool, nil +} + +func (p *PoolCollection) getPoolLockCollection(ctx context.Context, hash string) (pool *HashPool, unlock func(), err error) { + reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") + p.mutex.Lock() + unlock = func() { p.mutex.Unlock() } + reg.End() + + pool, ok := p.pools[hash] + if !ok { + // no such pool + err = ErrUnknownHash + } + + return pool, unlock, err +} + +// extend is only used for internal testing! +// it adds a new test DB to the pool and creates it according to the template. +// The new test DB is marked as 'Ready' and can be picked up with GetTestDatabase. +// If the pool size has already reached MAX, ErrPoolFull is returned. +func (p *PoolCollection) extend(ctx context.Context, templateDB db.Database) error { + hash := templateDB.TemplateHash + + pool, err := p.getPool(ctx, hash) + if err != nil { + return err + } + + return pool.extend(ctx) +} diff --git a/pkg/pool/pool_collection_internal_test.go b/pkg/pool/pool_collection_internal_test.go new file mode 100644 index 0000000..a843d0c --- /dev/null +++ b/pkg/pool/pool_collection_internal_test.go @@ -0,0 +1,347 @@ +package pool + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/allaboutapps/integresql/pkg/db" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPoolAddGet(t *testing.T) { + t.Parallel() + + ctx := context.Background() + cfg := PoolConfig{ + MaxPoolSize: 2, + MaxParallelTasks: 4, + TestDBNamePrefix: "prefix_", + disableWorkerAutostart: true, // no extend / cleanDirty tasks should run automatically! + } + p := NewPoolCollection(cfg) + + hash1 := "h1" + hash2 := "h2" + templateDB := db.Database{ + TemplateHash: hash1, + Config: db.DatabaseConfig{ + Username: "ich", + Database: "templateDBname", + }, + } + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + t.Log("(re)create ", testDB.Database) + return nil + } + p.InitHashPool(ctx, templateDB, initFunc) + + t.Cleanup(func() { p.Stop() }) + + // get from empty (just initialized) + _, err := p.GetTestDatabase(ctx, hash1, 0) + assert.Error(t, err, ErrTimeout) + + // add a new one + assert.NoError(t, p.extend(ctx, templateDB)) + // get it + testDB, err := p.GetTestDatabase(ctx, hash1, 1*time.Second) + assert.NoError(t, err) + assert.Equal(t, "prefix_h1_000", testDB.Database.Config.Database) + assert.Equal(t, "ich", testDB.Database.Config.Username) + + // add for h2 + templateDB2 := templateDB + templateDB2.TemplateHash = hash2 + p.InitHashPool(ctx, templateDB2, initFunc) + assert.NoError(t, p.extend(ctx, templateDB2)) + assert.NoError(t, p.extend(ctx, templateDB2)) + assert.ErrorIs(t, p.extend(ctx, templateDB2), ErrPoolFull) + + // get from empty h1 + _, err = p.GetTestDatabase(ctx, hash1, 100*time.Millisecond) + assert.ErrorIs(t, err, ErrTimeout) + + // get from h2 + testDB1, err := p.GetTestDatabase(ctx, hash2, 1*time.Second) + assert.NoError(t, err) + assert.Equal(t, hash2, testDB1.TemplateHash) + testDB2, err := p.GetTestDatabase(ctx, hash2, 1*time.Second) + assert.NoError(t, err) + assert.Equal(t, hash2, testDB2.TemplateHash) + assert.NotEqual(t, testDB1.ID, testDB2.ID) +} + +func TestPoolAddGetConcurrent(t *testing.T) { + t.Parallel() + ctx := context.Background() + + hash1 := "h1" + hash2 := "h2" + templateDB1 := db.Database{ + TemplateHash: hash1, + } + templateDB2 := db.Database{ + TemplateHash: hash2, + } + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + t.Log("(re)create ", testDB.Database) + return nil + } + + maxPoolSize := 15 + cfg := PoolConfig{ + MaxPoolSize: maxPoolSize, + InitialPoolSize: maxPoolSize, + MaxParallelTasks: 4, + TestDBNamePrefix: "", + } + p := NewPoolCollection(cfg) + t.Cleanup(func() { p.Stop() }) + + var wg sync.WaitGroup + sleepDuration := 10 * time.Millisecond + + // initialize hash pool + // initial test databases will be added automatically + p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB2, initFunc) + + // try to get them from another goroutines in parallel + getDB := func(hash string) { + defer wg.Done() + + sleepDuration := sleepDuration + + db, err := p.GetTestDatabase(ctx, hash, time.Duration(cfg.MaxPoolSize)*sleepDuration) + assert.NoError(t, err) + assert.Equal(t, hash, db.TemplateHash) + t.Logf("got %s %v\n", db.TemplateHash, db.ID) + } + + for i := 0; i < cfg.MaxPoolSize; i++ { + wg.Add(2) + go getDB(hash1) + go getDB(hash2) + } + + wg.Wait() + +} + +func TestPoolAddGetReturnConcurrent(t *testing.T) { + t.Parallel() + ctx := context.Background() + + hash1 := "h1" + hash2 := "h2" + templateDB1 := db.Database{ + TemplateHash: hash1, + } + templateDB2 := db.Database{ + TemplateHash: hash2, + } + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + return nil + } + + cfg := PoolConfig{ + MaxPoolSize: 40, + MaxParallelTasks: 4, + TestDBNamePrefix: "", + } + p := NewPoolCollection(cfg) + t.Cleanup(func() { p.Stop() }) + + p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB2, initFunc) + + var wg sync.WaitGroup + + // add DBs sequentially + for i := 0; i < cfg.MaxPoolSize/4; i++ { + assert.NoError(t, p.extend(ctx, templateDB1)) + assert.NoError(t, p.extend(ctx, templateDB2)) + } + + // stop the workers to prevent auto cleaning in background + p.Stop() + + // try to get them from another goroutines in parallel + getAndReturnDB := func(hash string) { + defer wg.Done() + + db, err := p.GetTestDatabase(ctx, hash, 3*time.Second) + assert.NoError(t, err) + assert.Equal(t, hash, db.TemplateHash) + t.Logf("returning %s %v\n", db.TemplateHash, db.ID) + assert.NoError(t, p.ReturnTestDatabase(ctx, hash, db.ID)) + } + + for i := 0; i < cfg.MaxPoolSize; i++ { + wg.Add(2) + go getAndReturnDB(hash1) + go getAndReturnDB(hash2) + } + + wg.Wait() +} + +func TestPoolRemoveAll(t *testing.T) { + t.Parallel() + ctx := context.Background() + + hash1 := "h1" + hash2 := "h2" + templateDB1 := db.Database{ + TemplateHash: hash1, + } + templateDB2 := db.Database{ + TemplateHash: hash2, + } + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + t.Log("(re)create ", testDB.Database) + return nil + } + removeFunc := func(ctx context.Context, testDB db.TestDatabase) error { + t.Log("remove ", testDB.Database) + return nil + } + + cfg := PoolConfig{ + MaxPoolSize: 6, + MaxParallelTasks: 4, + } + p := NewPoolCollection(cfg) + t.Cleanup(func() { p.Stop() }) + + p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB2, initFunc) + + // add DBs sequentially + for i := 0; i < cfg.MaxPoolSize; i++ { + assert.NoError(t, p.extend(ctx, templateDB1)) + assert.NoError(t, p.extend(ctx, templateDB2)) + } + + // remove all + assert.NoError(t, p.RemoveAll(ctx, removeFunc)) + + // try to get + _, err := p.GetTestDatabase(ctx, hash1, 0) + assert.Error(t, err, ErrTimeout) + _, err = p.GetTestDatabase(ctx, hash2, 0) + assert.Error(t, err, ErrTimeout) + + // start using pool again + p.InitHashPool(ctx, templateDB1, initFunc) + assert.NoError(t, p.extend(ctx, templateDB1)) + testDB, err := p.GetTestDatabase(ctx, hash1, 1*time.Second) + assert.NoError(t, err) + assert.Equal(t, 0, testDB.ID) +} + +func TestPoolReuseDirty(t *testing.T) { + t.Parallel() + ctx := context.Background() + + hash1 := "h1" + templateDB1 := db.Database{ + TemplateHash: hash1, + Config: db.DatabaseConfig{ + Database: "h1_template", + }, + } + + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + t.Log("(re)create ", testDB.Database.Config.Database) + return nil + } + + maxPoolSize := 40 + cfg := PoolConfig{ + MaxPoolSize: maxPoolSize, + InitialPoolSize: maxPoolSize, + MaxParallelTasks: 1, + TestDBNamePrefix: "test_", + } + p := NewPoolCollection(cfg) + + p.InitHashPool(ctx, templateDB1, initFunc) + t.Cleanup(func() { p.Stop() }) + + getDirty := func(seenIDMap *sync.Map) { + newTestDB1, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, 3*time.Second) + assert.NoError(t, err) + seenIDMap.Store(newTestDB1.ID, true) + } + + // allow for recycling inUse test DBs + var wg sync.WaitGroup + seenIDMap := sync.Map{} + for i := 0; i < 3*cfg.MaxPoolSize; i++ { + wg.Add(1) + go func() { + defer wg.Done() + getDirty(&seenIDMap) + }() + } + + wg.Wait() + + for id := 0; id < cfg.MaxPoolSize; id++ { + _, ok := seenIDMap.Load(id) + // every index should show up at least once + assert.True(t, ok, id) + } +} + +func TestPoolReturnTestDatabase(t *testing.T) { + t.Parallel() + ctx := context.Background() + + hash1 := "h1" + templateDB1 := db.Database{ + TemplateHash: hash1, + Config: db.DatabaseConfig{ + Database: "h1_template", + }, + } + + recreateTimesMap := sync.Map{} + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + times, existing := recreateTimesMap.LoadOrStore(testDB.ID, 1) + if existing { + recreateTimesMap.Store(testDB.ID, times.(int)+1) + } + + return nil + } + + cfg := PoolConfig{ + MaxPoolSize: 10, + MaxParallelTasks: 3, + disableWorkerAutostart: true, // no extend / cleanDirty tasks should run automatically! + } + p := NewPoolCollection(cfg) + + p.InitHashPool(ctx, templateDB1, initFunc) + // add just one test DB + require.NoError(t, p.extend(ctx, templateDB1)) + + testDB1, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) + assert.NoError(t, err) + + // assert that workers are stopped and no new DB showed up + _, err = p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) + assert.ErrorIs(t, err, ErrTimeout) + + // return and get the same one + assert.NoError(t, p.ReturnTestDatabase(ctx, hash1, testDB1.ID)) + testDB2, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) + assert.NoError(t, err) + assert.Equal(t, testDB1.ID, testDB2.ID) + +} diff --git a/pkg/templates/template.go b/pkg/templates/template.go new file mode 100644 index 0000000..79a3e97 --- /dev/null +++ b/pkg/templates/template.go @@ -0,0 +1,127 @@ +package templates + +import ( + "context" + "sync" + "time" + + "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/util" +) + +type TemplateState int32 + +const ( + TemplateStateInit TemplateState = iota + TemplateStateDiscarded + TemplateStateFinalized +) + +type Template struct { + TemplateConfig + db.Database + state TemplateState + + cond *sync.Cond + mutex sync.RWMutex +} + +type TemplateConfig struct { + db.DatabaseConfig +} + +func NewTemplate(hash string, config TemplateConfig) *Template { + t := &Template{ + TemplateConfig: config, + Database: db.Database{TemplateHash: hash, Config: config.DatabaseConfig}, + state: TemplateStateInit, + } + t.cond = sync.NewCond(&t.mutex) + + return t +} + +func (t *Template) GetConfig(_ context.Context) TemplateConfig { + t.mutex.RLock() + defer t.mutex.RUnlock() + + return t.TemplateConfig +} + +// GetState locks the template and checks its state. +func (t *Template) GetState(_ context.Context) TemplateState { + t.mutex.RLock() + defer t.mutex.RUnlock() + + return t.state +} + +// SetState sets the desired state and broadcasts the change to whoever is waiting for it. +func (t *Template) SetState(ctx context.Context, newState TemplateState) { + if t.GetState(ctx) == newState { + return + } + + t.mutex.Lock() + defer t.mutex.Unlock() + t.state = newState + + t.cond.Broadcast() +} + +// WaitUntilFinalized checks the current template state and returns directly if it's 'Finalized'. +// If it's not, the function waits the given timeout until the template state changes. +// On timeout, the old state is returned, otherwise - the new state. +func (t *Template) WaitUntilFinalized(ctx context.Context, timeout time.Duration) (exitState TemplateState) { + currentState := t.GetState(ctx) + if currentState == TemplateStateFinalized { + return currentState + } + + newState, err := util.WaitWithTimeout(ctx, timeout, func(context.Context) (TemplateState, error) { + t.cond.L.Lock() + defer t.cond.L.Unlock() + t.cond.Wait() + + return t.state, nil + }) + + if err != nil { + return currentState + } + return newState +} + +// GetStateWithLock gets the current state leaving the template locked. +// REMEMBER to unlock it when you no longer need it locked. +func (t *Template) GetStateWithLock(_ context.Context) (TemplateState, LockedTemplate) { + t.mutex.Lock() + + return t.state, LockedTemplate{t: t} +} + +type LockedTemplate struct { + t *Template +} + +// Unlock releases the locked template. +func (l *LockedTemplate) Unlock() { + if l.t != nil { + l.t.mutex.Unlock() + l.t = nil + } +} + +// SetState sets a new state of the locked template (without acquiring the lock again). +func (l LockedTemplate) SetState(_ context.Context, newState TemplateState) { + if l.t.state == newState { + return + } + + l.t.state = newState + l.t.cond.Broadcast() +} + +func (c TemplateConfig) Equals(other TemplateConfig) bool { + return c.DatabaseConfig.ConnectionString() == other.ConnectionString() +} diff --git a/pkg/templates/template_collection.go b/pkg/templates/template_collection.go new file mode 100644 index 0000000..4769a91 --- /dev/null +++ b/pkg/templates/template_collection.go @@ -0,0 +1,101 @@ +package templates + +import ( + "context" + "runtime/trace" + "sync" +) + +type Collection struct { + templates map[string]*Template + collMutex sync.RWMutex +} + +// Unlock function used to release the collection lock. +type Unlock func() + +func NewCollection() *Collection { + return &Collection{ + templates: make(map[string]*Template), + collMutex: sync.RWMutex{}, + } +} + +// Push tries to add a new template to the collection. +// If the template already exists and the config matches, added=false is returned. +// If config doesn't match, the template is overwritten and added=true is returned. +// This function locks the collection and no matter what is its output, the unlock function needs to be called to release the lock. +func (tc *Collection) Push(ctx context.Context, hash string, config TemplateConfig) (added bool, unlock Unlock) { + reg := trace.StartRegion(ctx, "get_template_lock") + tc.collMutex.Lock() + + unlock = func() { + tc.collMutex.Unlock() + reg.End() + } + + template, ok := tc.templates[hash] + if ok { + // check if settings match + + if template.GetConfig(ctx).Equals(config) { + return false, unlock + } + // else overwrite the template + } + + tc.templates[hash] = NewTemplate(hash, config) + return true, unlock +} + +// Pop removes a template from the collection returning it to the caller. +func (tc *Collection) Pop(ctx context.Context, hash string) (template *Template, found bool) { + reg := trace.StartRegion(ctx, "get_template_lock") + defer reg.End() + tc.collMutex.Lock() + defer tc.collMutex.Unlock() + + template, ok := tc.templates[hash] + if !ok { + return nil, false + } + + delete(tc.templates, hash) + return template, true +} + +// Get gets the requested template without removing it from the collection. +func (tc *Collection) Get(ctx context.Context, hash string) (template *Template, found bool) { + reg := trace.StartRegion(ctx, "get_template_lock") + defer reg.End() + + tc.collMutex.RLock() + defer tc.collMutex.RUnlock() + + template, ok := tc.templates[hash] + if !ok { + return nil, false + } + + return template, true +} + +// RemoveUnsafe removes the template and can be called ONLY IF THE COLLECTION IS LOCKED. +func (tc *Collection) RemoveUnsafe(_ context.Context, hash string) { + delete(tc.templates, hash) +} + +// RemoveAll removes all templates from the collection. +func (tc *Collection) RemoveAll(ctx context.Context) { + reg := trace.StartRegion(ctx, "get_template_lock") + defer reg.End() + + tc.collMutex.Lock() + defer tc.collMutex.Unlock() + + for hash, template := range tc.templates { + template.SetState(ctx, TemplateStateDiscarded) + + delete(tc.templates, hash) + } +} diff --git a/pkg/templates/template_collection_test.go b/pkg/templates/template_collection_test.go new file mode 100644 index 0000000..c69fde2 --- /dev/null +++ b/pkg/templates/template_collection_test.go @@ -0,0 +1,94 @@ +package templates_test + +import ( + "context" + "testing" + "time" + + "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/templates" + "github.com/allaboutapps/integresql/pkg/util" + "github.com/stretchr/testify/assert" +) + +func TestTemplateCollection(t *testing.T) { + ctx := context.Background() + + coll := templates.NewCollection() + cfg := templates.TemplateConfig{ + DatabaseConfig: db.DatabaseConfig{ + Username: "ich", + Database: "template_test", + }, + } + hash := "123" + + added, unlock := coll.Push(ctx, hash, cfg) + assert.True(t, added) + unlock() + + template1, found := coll.Get(ctx, hash) + assert.True(t, found) + + // get with lock + state, lockedTemplate := template1.GetStateWithLock(ctx) + assert.Equal(t, templates.TemplateStateInit, state) + + // try to get again when the template is locked + template2, found := coll.Get(ctx, hash) + assert.True(t, found) + + // assert that getting the state now won't succeed - template is locked + _, err := util.WaitWithTimeout(ctx, 100*time.Millisecond, func(ctx context.Context) (templates.TemplateState, error) { + return template1.GetState(ctx), nil + }) + assert.ErrorIs(t, err, util.ErrTimeout) + _, err = util.WaitWithTimeout(ctx, 100*time.Millisecond, func(ctx context.Context) (templates.TemplateState, error) { + return template2.GetState(ctx), nil + }) + assert.ErrorIs(t, err, util.ErrTimeout) + + // now set the new state and unlock the locked template + lockedTemplate.SetState(ctx, templates.TemplateStateDiscarded) + lockedTemplate.Unlock() + lockedTemplate.Unlock() + + assert.Equal(t, templates.TemplateStateDiscarded, template2.GetState(ctx)) + + // make sure that the template is still in the collection + template3, found := coll.Get(ctx, hash) + assert.True(t, found) + assert.Equal(t, "ich", template3.Config.Username) +} + +func TestTemplateCollectionPushWithOtherConfig(t *testing.T) { + ctx := context.Background() + + coll := templates.NewCollection() + cfg := templates.TemplateConfig{ + DatabaseConfig: db.DatabaseConfig{ + Username: "ich", + Database: "template_test", + }, + } + hash := "123" + + added, unlock := coll.Push(ctx, hash, cfg) + assert.True(t, added) + unlock() + + added, unlock = coll.Push(ctx, hash, cfg) + assert.False(t, added) + unlock() + + cfg.Database = "template_another" + added, unlock = coll.Push(ctx, hash, cfg) + assert.True(t, added) + unlock() + + // try to get again when the template is locked + template, found := coll.Get(ctx, hash) + assert.True(t, found) + assert.Equal(t, "template_another", template.Config.Database) + +} diff --git a/pkg/templates/template_test.go b/pkg/templates/template_test.go new file mode 100644 index 0000000..128c5dd --- /dev/null +++ b/pkg/templates/template_test.go @@ -0,0 +1,81 @@ +package templates_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/allaboutapps/integresql/pkg/templates" + "github.com/stretchr/testify/assert" +) + +func TestTemplateGetSetState(t *testing.T) { + ctx := context.Background() + + t1 := templates.NewTemplate("123", templates.TemplateConfig{}) + state := t1.GetState(ctx) + assert.Equal(t, templates.TemplateStateInit, state) + + t1.SetState(ctx, templates.TemplateStateFinalized) + state = t1.GetState(ctx) + assert.Equal(t, templates.TemplateStateFinalized, state) + + t1.SetState(ctx, templates.TemplateStateDiscarded) + state = t1.GetState(ctx) + assert.Equal(t, templates.TemplateStateDiscarded, state) +} + +func TestForReady(t *testing.T) { + ctx := context.Background() + goroutineNum := 10 + + // initalize a new template, not ready yet + t1 := templates.NewTemplate("123", templates.TemplateConfig{}) + state := t1.GetState(ctx) + assert.Equal(t, templates.TemplateStateInit, state) + + var wg sync.WaitGroup + errsChan := make(chan error, 2*goroutineNum) + + // these goroutines should get ready state after waiting long enough + for i := 0; i < goroutineNum; i++ { + wg.Add(1) + go func() { + defer wg.Done() + timeout := 1 * time.Second + state := t1.WaitUntilFinalized(ctx, timeout) + if state != templates.TemplateStateFinalized { + errsChan <- fmt.Errorf("expected state %v (finalized), but is %v", templates.TemplateStateFinalized, state) + } + }() + } + + // these goroutines should run into timeout + for i := 0; i < goroutineNum; i++ { + wg.Add(1) + go func() { + defer wg.Done() + timeout := 30 * time.Millisecond + state := t1.WaitUntilFinalized(ctx, timeout) + if state != templates.TemplateStateInit { + errsChan <- fmt.Errorf("expected state %v (init), but is %v", templates.TemplateStateInit, state) + } + }() + } + + // now set state + time.Sleep(50 * time.Millisecond) + t1.SetState(ctx, templates.TemplateStateFinalized) + + wg.Wait() + close(errsChan) + + if len(errsChan) > 0 { + for err := range errsChan { + t.Error(err) + } + t.Fail() + } +} diff --git a/pkg/util/context.go b/pkg/util/context.go new file mode 100644 index 0000000..00ce454 --- /dev/null +++ b/pkg/util/context.go @@ -0,0 +1,58 @@ +package util + +import ( + "context" + "errors" +) + +type contextKey string + +const ( + CTXKeyUser contextKey = "user" + CTXKeyAccessToken contextKey = "access_token" + CTXKeyRequestID contextKey = "request_id" + CTXKeyDisableLogger contextKey = "disable_logger" + CTXKeyCacheControl contextKey = "cache_control" +) + +// RequestIDFromContext returns the ID of the (HTTP) request, returning an error if it is not present. +func RequestIDFromContext(ctx context.Context) (string, error) { + val := ctx.Value(CTXKeyRequestID) + if val == nil { + return "", errors.New("No request ID present in context") + } + + id, ok := val.(string) + if !ok { + return "", errors.New("Request ID in context is not a string") + } + + return id, nil +} + +// ShouldDisableLogger checks whether the logger instance should be disabled for the provided context. +// `util.LogFromContext` will use this function to check whether it should return a default logger if +// none has been set by our logging middleware before, or fall back to the disabled logger, suppressing +// all output. Use `ctx = util.DisableLogger(ctx, true)` to disable logging for the given context. +func ShouldDisableLogger(ctx context.Context) bool { + s := ctx.Value(CTXKeyDisableLogger) + if s == nil { + return false + } + + shouldDisable, ok := s.(bool) + if !ok { + return false + } + + return shouldDisable +} + +// DisableLogger toggles the indication whether `util.LogFromContext` should return a disabled logger +// for a context if none has been set by our logging middleware before. Whilst the usecase for a disabled +// logger are relatively minimal (we almost always want to have some log output, even if the context +// was not directly derived from a HTTP request), this functionality was provideds so you can switch back +// to the old zerolog behavior if so desired. +func DisableLogger(ctx context.Context, shouldDisable bool) context.Context { + return context.WithValue(ctx, CTXKeyDisableLogger, shouldDisable) +} diff --git a/pkg/util/log.go b/pkg/util/log.go new file mode 100644 index 0000000..c1f4ceb --- /dev/null +++ b/pkg/util/log.go @@ -0,0 +1,42 @@ +package util + +import ( + "context" + + "github.com/labstack/echo/v4" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +// LogFromContext returns a request-specific zerolog instance using the provided context. +// The returned logger will have the request ID as well as some other value predefined. +// If no logger is associated with the context provided, the global zerolog instance +// will be returned instead - this function will _always_ return a valid (enabled) logger. +// Should you ever need to force a disabled logger for a context, use `util.DisableLogger(ctx, true)` +// and pass the context returned to other code/`LogFromContext`. +func LogFromContext(ctx context.Context) *zerolog.Logger { + l := log.Ctx(ctx) + if l.GetLevel() == zerolog.Disabled { + if ShouldDisableLogger(ctx) { + return l + } + l = &log.Logger + } + return l +} + +// LogFromEchoContext returns a request-specific zerolog instance using the echo.Context of the request. +// The returned logger will have the request ID as well as some other value predefined. +func LogFromEchoContext(c echo.Context) *zerolog.Logger { + return LogFromContext(c.Request().Context()) +} + +func LogLevelFromString(s string) zerolog.Level { + l, err := zerolog.ParseLevel(s) + if err != nil { + log.Error().Err(err).Msgf("Failed to parse log level, defaulting to %s", zerolog.DebugLevel) + return zerolog.DebugLevel + } + + return l +} diff --git a/pkg/util/log_test.go b/pkg/util/log_test.go new file mode 100644 index 0000000..c318b74 --- /dev/null +++ b/pkg/util/log_test.go @@ -0,0 +1,20 @@ +package util_test + +import ( + "testing" + + "github.com/allaboutapps/integresql/pkg/util" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" +) + +func TestLogLevelFromString(t *testing.T) { + res := util.LogLevelFromString("panic") + assert.Equal(t, zerolog.PanicLevel, res) + + res = util.LogLevelFromString("warn") + assert.Equal(t, zerolog.WarnLevel, res) + + res = util.LogLevelFromString("foo") + assert.Equal(t, zerolog.DebugLevel, res) +} diff --git a/pkg/util/retry.go b/pkg/util/retry.go index 1f629c7..ac5fb18 100644 --- a/pkg/util/retry.go +++ b/pkg/util/retry.go @@ -17,5 +17,5 @@ func Retry(attempts int, sleep time.Duration, f func() error) error { time.Sleep(sleep) } - return fmt.Errorf("failing after %d attempts, lat error: %v", attempts, err) + return fmt.Errorf("failing after %d attempts, lat error: %w", attempts, err) } diff --git a/pkg/util/wait.go b/pkg/util/wait.go new file mode 100644 index 0000000..bd05a23 --- /dev/null +++ b/pkg/util/wait.go @@ -0,0 +1,56 @@ +package util + +import ( + "context" + "errors" + "time" + + "golang.org/x/sync/errgroup" +) + +var ErrTimeout = errors.New("timeout while waiting for operation to complete") + +// WaitWithTimeout waits for the operation to complete of returns the ErrTimeout. +func WaitWithTimeout[T any](ctx context.Context, timeout time.Duration, operation func(context.Context) (T, error)) (T, error) { + cctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + resChan := make(chan T, 1) + g, cctx := errgroup.WithContext(cctx) + + g.Go(func() error { + res, err := operation(cctx) + resChan <- res + return err + }) + + select { + case res := <-resChan: + return res, g.Wait() + case <-time.After(timeout): + var empty T + return empty, ErrTimeout + } +} + +// WaitWithCancellableCtx runs the operation tracking the context state. +// If the given context is cancelled, the function returns directly with ErrTimeout. +func WaitWithCancellableCtx[T any](ctx context.Context, operation func(context.Context) (T, error)) (T, error) { + + resChan := make(chan T, 1) + g, cctx := errgroup.WithContext(ctx) + + g.Go(func() error { + res, err := operation(cctx) + resChan <- res + return err + }) + + select { + case res := <-resChan: + return res, g.Wait() + case <-ctx.Done(): + var empty T + return empty, ErrTimeout + } +} diff --git a/pkg/util/wait_test.go b/pkg/util/wait_test.go new file mode 100644 index 0000000..903645a --- /dev/null +++ b/pkg/util/wait_test.go @@ -0,0 +1,54 @@ +package util_test + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/allaboutapps/integresql/pkg/util" + "github.com/stretchr/testify/assert" +) + +func TestWaitWithTimeout(t *testing.T) { + ctx := context.Background() + type output struct { + A int + } + + // operation timeout + start := time.Now() + res, err := util.WaitWithTimeout(ctx, time.Millisecond*100, func(ctx context.Context) (output, error) { + time.Sleep(time.Millisecond * 200) + return output{A: 1}, nil + }) + elapsed := time.Since(start) + + assert.ErrorIs(t, err, util.ErrTimeout) + assert.Empty(t, res) + assert.Less(t, elapsed, 150*time.Millisecond) + + // operation completed + start = time.Now() + res, err = util.WaitWithTimeout(ctx, time.Millisecond*200, func(ctx context.Context) (output, error) { + time.Sleep(time.Millisecond * 160) + return output{A: 1}, nil + }) + elapsed = time.Since(start) + + assert.NoError(t, err) + assert.Equal(t, 1, res.A) + assert.Less(t, elapsed, 180*time.Millisecond) + + // operation completed with error + testErr := errors.New("test error") + start = time.Now() + res, err = util.WaitWithTimeout(ctx, time.Millisecond*100, func(ctx context.Context) (output, error) { + return output{}, testErr + }) + elapsed = time.Since(start) + + assert.ErrorIs(t, err, testErr) + assert.Empty(t, res) + assert.Less(t, elapsed, 120*time.Millisecond) +} diff --git a/tests/integresql_test.go b/tests/integresql_test.go new file mode 100644 index 0000000..8fd5ddb --- /dev/null +++ b/tests/integresql_test.go @@ -0,0 +1,117 @@ +// Package integresql_test provides benchmarks to test integresql performance. +// Before running any of the tests, make sure that integresql is running. +package integresql_test + +import ( + "context" + "database/sql" + "testing" + "time" + + "github.com/allaboutapps/integresql/tests/testclient" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func BenchmarkGetDatabaseFromNewTemplate(b *testing.B) { + ctx := context.Background() + client, err := testclient.DefaultClientFromEnv() + require.NoError(b, err) + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + newTemplateHash := uuid.NewString() + + err := client.SetupTemplateWithDBClient(ctx, newTemplateHash, func(db *sql.DB) error { + _, err := db.ExecContext(ctx, `CREATE TABLE users ( + id int NOT NULL, + username varchar(255) NOT NULL, + created_at timestamptz NOT NULL, + CONSTRAINT users_pkey PRIMARY KEY (id));`) + require.NoError(b, err) + res, err := db.ExecContext(ctx, ` + INSERT INTO users (id, username, created_at) + VALUES + (1, 'user1', $1), + (2, 'user2', $1); + `, time.Now()) + require.NoError(b, err) + inserted, err := res.RowsAffected() + require.NoError(b, err) + require.Equal(b, int64(2), inserted) + return nil + }) + require.NoError(b, err) + + dbConfig, err := client.GetTestDatabase(ctx, newTemplateHash) + require.NoError(b, err) + db, err := sql.Open("postgres", dbConfig.Config.ConnectionString()) + require.NoError(b, err) + defer db.Close() + + require.NoError(b, db.PingContext(ctx)) + row := db.QueryRowContext(ctx, "SELECT COUNT(id) FROM users;") + require.NoError(b, row.Err()) + var userCnt int + require.NoError(b, row.Scan(&userCnt)) + assert.Equal(b, 2, userCnt) + db.Close() + + require.NoError(b, client.DiscardTemplate(ctx, newTemplateHash)) + } + }) + +} + +func BenchmarkGetDatabaseFromExistingTemplate(b *testing.B) { + ctx := context.Background() + client, err := testclient.DefaultClientFromEnv() + require.NoError(b, err) + + newTemplateHash := uuid.NewString() + err = client.SetupTemplateWithDBClient(ctx, newTemplateHash, func(db *sql.DB) error { + _, err := db.ExecContext(ctx, `CREATE TABLE users ( + id int NOT NULL, + username varchar(255) NOT NULL, + created_at timestamptz NOT NULL, + CONSTRAINT users_pkey PRIMARY KEY (id));`) + require.NoError(b, err) + res, err := db.ExecContext(ctx, ` + INSERT INTO users (id, username, created_at) + VALUES + (1, 'user1', $1); + `, time.Now()) + require.NoError(b, err) + inserted, err := res.RowsAffected() + require.NoError(b, err) + require.Equal(b, int64(1), inserted) + return nil + }) + require.NoError(b, err) + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + + dbConfig, err := client.GetTestDatabase(ctx, newTemplateHash) + require.NoError(b, err) + db, err := sql.Open("postgres", dbConfig.Config.ConnectionString()) + require.NoError(b, err) + defer db.Close() + + require.NoError(b, db.PingContext(ctx)) + row := db.QueryRowContext(ctx, "SELECT COUNT(id) FROM users;") + require.NoError(b, row.Err()) + var userCnt int + require.NoError(b, row.Scan(&userCnt)) + assert.Equal(b, 1, userCnt) + // keep the DB for some time before returning + time.Sleep(time.Second) + db.Close() + + require.NoError(b, client.ReturnTestDatabase(ctx, newTemplateHash, dbConfig.ID)) + } + }) + + b.Cleanup(func() { require.NoError(b, client.DiscardTemplate(ctx, newTemplateHash)) }) +} diff --git a/tests/testclient/client.go b/tests/testclient/client.go new file mode 100644 index 0000000..0b4c171 --- /dev/null +++ b/tests/testclient/client.go @@ -0,0 +1,305 @@ +// Package testclient provides a simple integresql client implementation for test purposes only. +// Please refer to https://github.com/allaboutapps/integresql-client-go +// for a full client implementation to be used in your application. +package testclient + +import ( + "bytes" + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + + "github.com/allaboutapps/integresql/pkg/manager" + "github.com/allaboutapps/integresql/pkg/util" + + // Import postgres driver for database/sql package + _ "github.com/lib/pq" +) + +type ClientConfig struct { + BaseURL string + APIVersion string +} + +func DefaultClientConfigFromEnv() ClientConfig { + return ClientConfig{ + BaseURL: util.GetEnv("INTEGRESQL_CLIENT_BASE_URL", "http://integresql:5000/api"), + APIVersion: util.GetEnv("INTEGRESQL_CLIENT_API_VERSION", "v1"), + } +} + +type Client struct { + baseURL *url.URL + client *http.Client + config ClientConfig +} + +func NewClient(config ClientConfig) (*Client, error) { + c := &Client{ + baseURL: nil, + client: nil, + config: config, + } + + defaultConfig := DefaultClientConfigFromEnv() + + if len(c.config.BaseURL) == 0 { + c.config.BaseURL = defaultConfig.BaseURL + } + + if len(c.config.APIVersion) == 0 { + c.config.APIVersion = defaultConfig.APIVersion + } + + u, err := url.Parse(c.config.BaseURL) + if err != nil { + return nil, err + } + + c.baseURL = u.ResolveReference(&url.URL{Path: path.Join(u.Path, c.config.APIVersion)}) + + c.client = &http.Client{} + + return c, nil +} + +func DefaultClientFromEnv() (*Client, error) { + return NewClient(DefaultClientConfigFromEnv()) +} + +func (c *Client) ResetAllTracking(ctx context.Context) error { + req, err := c.newRequest(ctx, "DELETE", "/admin/templates", nil) + if err != nil { + return err + } + + var msg string + resp, err := c.do(req, &msg) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + return fmt.Errorf("failed to reset all tracking: %v", msg) + } + + return nil +} + +func (c *Client) InitializeTemplate(ctx context.Context, hash string) (TemplateDatabase, error) { + var template TemplateDatabase + + payload := map[string]string{"hash": hash} + + req, err := c.newRequest(ctx, "POST", "/templates", payload) + if err != nil { + return template, err + } + + resp, err := c.do(req, &template) + if err != nil { + return template, err + } + + switch resp.StatusCode { + case http.StatusOK: + return template, nil + case http.StatusLocked: + return template, manager.ErrTemplateAlreadyInitialized + case http.StatusServiceUnavailable: + return template, manager.ErrManagerNotReady + default: + return template, fmt.Errorf("received unexpected HTTP status %d (%s)", resp.StatusCode, resp.Status) + } +} + +func (c *Client) SetupTemplate(ctx context.Context, hash string, init func(conn string) error) error { + template, err := c.InitializeTemplate(ctx, hash) + if err == nil { + if err := init(template.Config.ConnectionString()); err != nil { + return err + } + + return c.FinalizeTemplate(ctx, hash) + } else if errors.Is(err, manager.ErrTemplateAlreadyInitialized) { + return nil + } + + return err + +} + +func (c *Client) SetupTemplateWithDBClient(ctx context.Context, hash string, init func(db *sql.DB) error) error { + template, err := c.InitializeTemplate(ctx, hash) + if err != nil { + return err + } + + db, err := sql.Open("postgres", template.Config.ConnectionString()) + if err != nil { + return err + } + defer db.Close() + + if err := db.PingContext(ctx); err != nil { + return err + } + + if err := init(db); err != nil { + return err + } + + return c.FinalizeTemplate(ctx, hash) +} + +func (c *Client) DiscardTemplate(ctx context.Context, hash string) error { + req, err := c.newRequest(ctx, "DELETE", fmt.Sprintf("/templates/%s", hash), nil) + if err != nil { + return err + } + + resp, err := c.do(req, nil) + if err != nil { + return err + } + + switch resp.StatusCode { + case http.StatusNoContent: + return nil + case http.StatusNotFound: + return manager.ErrTemplateNotFound + case http.StatusServiceUnavailable: + return manager.ErrManagerNotReady + default: + return fmt.Errorf("received unexpected HTTP status %d (%s)", resp.StatusCode, resp.Status) + } +} + +func (c *Client) FinalizeTemplate(ctx context.Context, hash string) error { + req, err := c.newRequest(ctx, "PUT", fmt.Sprintf("/templates/%s", hash), nil) + if err != nil { + return err + } + + resp, err := c.do(req, nil) + if err != nil { + return err + } + + switch resp.StatusCode { + case http.StatusNoContent: + return nil + case http.StatusNotFound: + return manager.ErrTemplateNotFound + case http.StatusServiceUnavailable: + return manager.ErrManagerNotReady + default: + return fmt.Errorf("received unexpected HTTP status %d (%s)", resp.StatusCode, resp.Status) + } +} + +func (c *Client) GetTestDatabase(ctx context.Context, hash string) (TestDatabase, error) { + var test TestDatabase + + req, err := c.newRequest(ctx, "GET", fmt.Sprintf("/templates/%s/tests", hash), nil) + if err != nil { + return test, err + } + + resp, err := c.do(req, &test) + if err != nil { + return test, err + } + + switch resp.StatusCode { + case http.StatusOK: + return test, nil + case http.StatusNotFound: + return test, manager.ErrTemplateNotFound + case http.StatusGone: + return test, manager.ErrTestNotFound + case http.StatusServiceUnavailable: + return test, manager.ErrManagerNotReady + default: + return test, fmt.Errorf("received unexpected HTTP status %d (%s)", resp.StatusCode, resp.Status) + } +} + +func (c *Client) ReturnTestDatabase(ctx context.Context, hash string, id int) error { + req, err := c.newRequest(ctx, "DELETE", fmt.Sprintf("/templates/%s/tests/%d", hash, id), nil) + if err != nil { + return err + } + + resp, err := c.do(req, nil) + if err != nil { + return err + } + + switch resp.StatusCode { + case http.StatusNoContent: + return nil + case http.StatusNotFound: + return manager.ErrTemplateNotFound + case http.StatusServiceUnavailable: + return manager.ErrManagerNotReady + default: + return fmt.Errorf("received unexpected HTTP status %d (%s)", resp.StatusCode, resp.Status) + } +} + +func (c *Client) newRequest(ctx context.Context, method string, endpoint string, body interface{}) (*http.Request, error) { + u := c.baseURL.ResolveReference(&url.URL{Path: path.Join(c.baseURL.Path, endpoint)}) + + var buf io.ReadWriter + if body != nil { + buf = new(bytes.Buffer) + if err := json.NewEncoder(buf).Encode(body); err != nil { + return nil, err + } + } + + req, err := http.NewRequestWithContext(ctx, method, u.String(), buf) + if err != nil { + return nil, err + } + + if body != nil { + req.Header.Set("Content-Type", "application/json; charset=UTF-8") + } + + req.Header.Set("Accept", "application/json") + + return req, nil +} + +func (c *Client) do(req *http.Request, v interface{}) (*http.Response, error) { + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + + // body must always be closed + defer resp.Body.Close() + + if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNoContent { + return resp, nil + } + + // if the provided v pointer is nil we cannot unmarschal the body to anything + if v == nil { + return resp, nil + } + + if err := json.NewDecoder(resp.Body).Decode(v); err != nil { + return nil, err + } + + return resp, err +} diff --git a/tests/testclient/models.go b/tests/testclient/models.go new file mode 100644 index 0000000..c93933a --- /dev/null +++ b/tests/testclient/models.go @@ -0,0 +1,56 @@ +package testclient + +import ( + "fmt" + "sort" + "strings" +) + +type TestDatabase struct { + Database `json:"database"` + + ID int `json:"id"` +} + +type TemplateDatabase struct { + Database `json:"database"` +} + +type Database struct { + TemplateHash string `json:"templateHash"` + Config DatabaseConfig `json:"config"` +} + +type DatabaseConfig struct { + Host string `json:"host"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"password"` + Database string `json:"database"` + AdditionalParams map[string]string `json:"additionalParams,omitempty"` // Optional additional connection parameters mapped into the connection string +} + +// Generates a connection string to be passed to sql.Open or equivalents, assuming Postgres syntax +func (c DatabaseConfig) ConnectionString() string { + var b strings.Builder + b.WriteString(fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", c.Host, c.Port, c.Username, c.Password, c.Database)) + + if _, ok := c.AdditionalParams["sslmode"]; !ok { + b.WriteString(" sslmode=disable") + } + + if len(c.AdditionalParams) > 0 { + params := make([]string, 0, len(c.AdditionalParams)) + for param := range c.AdditionalParams { + params = append(params, param) + } + + sort.Strings(params) + + for _, param := range params { + fmt.Fprintf(&b, " %s=%s", param, c.AdditionalParams[param]) + } + } + + return b.String() +} diff --git a/tmp/.gitignore b/tmp/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/tools.go b/tools.go index 37c9136..6517aa3 100644 --- a/tools.go +++ b/tools.go @@ -1,4 +1,4 @@ -// +build tools +//go:build tools // Tooling dependencies // https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module