diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index bb5713b..9bbae88 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -31,12 +31,3 @@ jobs: - name: Run tests run: make test-integration - - golden: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Golden diff - working-directory: component/ - run: make test golden-diff-all diff --git a/.gitignore b/.gitignore index 4607f30..462ade6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# VSCode +.vscode/ + # Goreleaser /dist/ /.github/release-notes.md @@ -17,3 +20,8 @@ # Configuration env .env + +__debug* + +# Golden test that may contain live API keys for local testing +fromenv/ diff --git a/Makefile b/Makefile index 3cb0f29..59fb530 100644 --- a/Makefile +++ b/Makefile @@ -30,7 +30,7 @@ build-bin: fmt vet ## Build binary build-docker: ## Build docker image env CGO_ENABLED=0 GOOS=$(DOCKER_IMAGE_GOOS) GOARCH=$(DOCKER_IMAGE_GOARCH) \ go build -o ${BIN_FILENAME} - $(DOCKER_CMD) build -t $(CONTAINER_IMG) . + $(DOCKER_CMD) build --platform $(DOCKER_IMAGE_GOOS)/$(DOCKER_IMAGE_GOARCH) -t $(CONTAINER_IMG) . .PHONY: docs-serve docs-serve: ## Preview the documentation diff --git a/README.md b/README.md index 00e142f..336fef7 100644 --- a/README.md +++ b/README.md @@ -8,27 +8,35 @@ [build]: https://github.com/vshn/billing-collector-cloudservices/actions?query=workflow%3ATest [releases]: https://github.com/vshn/billing-collector-cloudservices/releases -Batch job to sync usage data from the Exoscale and Cloudscale API to the [APPUiO Cloud reporting](https://github.com/appuio/appuio-cloud-reporting/) database. +Batch job to sync usage data from the Exoscale and Cloudscale API to Odoo16. -Metrics are collected taking into account product (e.g. `object-storage-storage:exoscale`), source (e.g. `exoscale:namespace`), tenant (as organization) and date time. +Metrics are collected based on [metered billing data flow](https://docs.central.vshn.ch/metered-billing-data-flow.html) See the [component documentation](https://hub.syn.tools/billing-collector-cloudservices/index.html) for more information. +## APPUiO Cloud vs APPUiO Managed + +The tool operates in 2 modes - APPUiO Cloud and APPUiO Managed. +The mode is decided by the environment variable `APPUIO_MANAGED_SALES_ORDER`. +If the sales order is set, the tool assumes that the whole cluster is APPUiO Managed thus changing the business logic accordingly. + ## Getting started for developers In order to run this tool, you need -* An instance of the billing database * Access to the Exoscale and Cloudscale accounts which has the services to be invoiced * Access to the Kubernetes cluster which has the claims corresponding to the Exoscale services +* Access to Odoo16 where to send the metrics data Get all this (see below), and put it all into an 'env' file: ``` export EXOSCALE_API_KEY="..." export EXOSCALE_API_SECRET="..." -export KUBERNETES_SERVER_URL='https://...' -export KUBERNETES_SERVER_TOKEN='...' -export ACR_DB_URL="postgres://reporting:reporting@localhost/appuio-cloud-reporting-test?sslmode=disable" +export APPUIO_MANAGED_SALES_"..." +export ODOO_OAUTH_CLIENT_SECRET="..." +export ODOO_OAUTH_TOKEN_URL="..." +export ODOO_URL="..." +export CLUSTER_ID="..." ``` Then source the env file and run the client: @@ -40,24 +48,11 @@ $ make build Then, run one of the available commands: -* Object Storage: -``` -$ ./billing-collector-cloudservices exoscale objectstorage -``` - -* DBaaS (runs metrics collector for all supported databases): +* Exoscale DBaaS: ``` $ ./billing-collector-cloudservices exoscale dbaas ``` -### Billing Database - -Provided that you have Docker installed, you can easily run a local instance of the billing database by getting the [appuio-cloud-reporting](https://github.com/appuio/appuio-cloud-reporting/) repository and running: - -``` -$ make start-acr -``` - ### Create Resources in Lab Cluster to test metrics collector You can first connect to your cluster and then create a claim for Postgres Database by applying a claim, for example: @@ -70,20 +65,13 @@ metadata: name: exoscale-postgres-lab-test-1 spec: parameters: - backup: - timeOfDay: '13:00:00' - maintenance: - dayOfWeek: monday - timeOfDay: "12:00:00" size: plan: hobbyist-2 - service: - majorVersion: "14" writeConnectionSecretToRef: name: postgres-connection-details ``` -Once the database is created and `Ready`, you can run locally the command: +Once the database is created and `Ready`, you can run locally the command with the required env variables: ``` $ ./billing-collector-cloudservices exoscale dbaas ``` @@ -98,36 +86,9 @@ You can get your Exoscale API key and secret from the Exoscale web UI. Be sure t The token should be restricted to the 'sos' and 'dbaas' services. -### Kubernetes API token - -The following instructions work for OpenShift via the 'oc' utility. Not all of them will work with kubectl. - -The commands assume that you are logged in to the Kubernetes cluster you want to use, and your working directory needs to be this git repository. - -Instructions for OpenShift >=4.11: -``` -$ cd billing-collector-cloudservices -$ oc -n default --as cluster-admin apply -f clusterrole.yaml -$ oc -n default --as cluster-admin create serviceaccount vshn-billing-collector-cloudservices -$ oc --as cluster-admin adm policy add-cluster-role-to-user vshn-billing-collector-cloudservices system:serviceaccount:default:vshn-billing-collector-cloudservices -$ oc -n default --as cluster-admin apply -f clusterrole-secret.yaml -$ oc -n default --as cluster-admin get secret vshn-billing-collector-cloudservices-secret -o jsonpath='{.data.token}' | base64 -d -``` - -Instructions for OpenShift <=4.10: -``` -$ cd billing-collector-cloudservices -$ oc -n default --as cluster-admin apply -f clusterrole.yaml -$ oc -n default --as cluster-admin create serviceaccount vshn-billing-collector-cloudservices -$ oc --as cluster-admin adm policy add-cluster-role-to-user vshn-billing-collector-cloudservices system:serviceaccount:default:vshn-billing-collector-cloudservices -$ oc -n default --as cluster-admin serviceaccounts get-token vshn-billing-collector-cloudservices -``` - -The last command will print out your token without trailing newline; be sure to copy the correct part of the output. - ### Integration tests -Integration tests create an envtest cluster and store data in an ACR (appuio-cloud-reporting) database. This is all automated when running: +Integration tests create an envtest cluster and export the metrics locally. This is all automated when running: ```bash $ make test-integration @@ -136,11 +97,6 @@ $ make test-integration To run integration tests in your IDE of choice, be sure to set build tag `integration` and the following env variables: ```bash -ACR_DB_URL=postgres://reporting:reporting@localhost/appuio-cloud-reporting-test?sslmode=disable -CLOUDSCALE_API_TOKEN= -EXOSCALE_API_KEY= -EXOSCALE_API_SECRET= - # path to directory where the respective go modules are installed. You can also specify the path to the local clone of the respective repositories. EXOSCALE_CRDS_PATH="$(go list -f '{{.Dir}}' -m github.com/vshn/provider-exoscale)/package/crds)" CLOUDSCALE_CRDS_PATH="$(go list -f '{{.Dir}}' -m github.com/vshn/provider-cloudscale)/package/crds)" diff --git a/component/Makefile b/component/Makefile deleted file mode 100644 index 9a926b2..0000000 --- a/component/Makefile +++ /dev/null @@ -1,85 +0,0 @@ -MAKEFLAGS += --warn-undefined-variables -SHELL := bash -.SHELLFLAGS := -eu -o pipefail -c -.DEFAULT_GOAL := all -.DELETE_ON_ERROR: -.SUFFIXES: - -include Makefile.vars.mk - -.PHONY: help -help: ## Show this help - @grep -E -h '\s##\s' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = "(: ).*?## "}; {gsub(/\\:/,":", $$1)}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' - -.PHONY: all -all: lint - -.PHONY: lint -lint: lint_jsonnet lint_yaml lint_adoc lint_kubent ## All-in-one linting - -.PHONY: lint_jsonnet -lint_jsonnet: $(JSONNET_FILES) ## Lint jsonnet files - $(JSONNET_DOCKER) $(JSONNETFMT_ARGS) --test -- $? - -.PHONY: lint_yaml -lint_yaml: ## Lint yaml files - $(YAMLLINT_DOCKER) -f parsable -c $(YAMLLINT_CONFIG) $(YAMLLINT_ARGS) -- . - -.PHONY: lint_adoc -lint_adoc: ## Lint documentation - $(VALE_CMD) $(VALE_ARGS) -.PHONY: lint_kubent -lint_kubent: ## Check for deprecated Kubernetes API versions - $(KUBENT_DOCKER) $(KUBENT_ARGS) -f $(KUBENT_FILES) - -.PHONY: format -format: format_jsonnet ## All-in-one formatting - -.PHONY: format_jsonnet -format_jsonnet: $(JSONNET_FILES) ## Format jsonnet files - $(JSONNET_DOCKER) $(JSONNETFMT_ARGS) -- $? - -.PHONY: docs-serve -docs-serve: ## Preview the documentation - $(ANTORA_PREVIEW_CMD) - -.PHONY: compile -.compile: - mkdir -p dependencies - $(COMPILE_CMD) - -.PHONY: test -test: commodore_args += -f tests/$(instance).yml -test: .compile ## Compile the component - -.PHONY: gen-golden -gen-golden: commodore_args += -f tests/$(instance).yml -gen-golden: clean .compile ## Update the reference version for target `golden-diff`. - @rm -rf tests/golden/$(instance) - @mkdir -p tests/golden/$(instance) - @cp -R compiled/. tests/golden/$(instance)/. - -.PHONY: golden-diff -golden-diff: commodore_args += -f tests/$(instance).yml -golden-diff: clean .compile ## Diff compile output against the reference version. Review output and run `make gen-golden golden-diff` if this target fails. - @git diff --exit-code --minimal --no-index -- tests/golden/$(instance) compiled/ - -.PHONY: golden-diff-all -golden-diff-all: recursive_target=golden-diff -golden-diff-all: $(test_instances) ## Run golden-diff for all instances. Note: this doesn't work when running make with multiple parallel jobs (-j != 1). - -.PHONY: gen-golden-all -gen-golden-all: recursive_target=gen-golden -gen-golden-all: $(test_instances) ## Run gen-golden for all instances. Note: this doesn't work when running make with multiple parallel jobs (-j != 1). - -.PHONY: lint_kubent_all -lint_kubent_all: recursive_target=lint_kubent -lint_kubent_all: $(test_instances) ## Lint deprecated Kubernetes API versions for all golden test instances. Will exit on first error. Note: this doesn't work when running make with multiple parallel jobs (-j != 1). - -.PHONY: $(test_instances) -$(test_instances): - $(MAKE) $(recursive_target) -e instance=$(basename $(@F)) - -.PHONY: clean -clean: ## Clean the project - rm -rf .cache compiled dependencies vendor helmcharts jsonnetfile*.json || true diff --git a/component/Makefile.vars.mk b/component/Makefile.vars.mk deleted file mode 100644 index 72f6a2a..0000000 --- a/component/Makefile.vars.mk +++ /dev/null @@ -1,47 +0,0 @@ -# Commodore takes the root dir name as the component name -COMPONENT_NAME ?= billing-collector-cloudservices -COMPONENT_SUBDIR ?= $(shell basename ${PWD}) - -compiled_path ?= compiled/$(COMPONENT_NAME)/$(COMPONENT_NAME) -root_volume ?= -v "$${PWD}/../:/$(COMPONENT_NAME)" -compiled_volume ?= -v "$${PWD}/$(compiled_path):/$(COMPONENT_NAME)" -commodore_args ?= --search-paths . -n $(COMPONENT_NAME) --alias $(instance) - -ifneq "$(shell which docker 2>/dev/null)" "" - DOCKER_CMD ?= $(shell which docker) - DOCKER_USERNS ?= "" -else - DOCKER_CMD ?= podman - DOCKER_USERNS ?= keep-id -endif -DOCKER_ARGS ?= run --rm -u "$$(id -u):$$(id -g)" --userns=$(DOCKER_USERNS) -w /$(COMPONENT_NAME)/$(COMPONENT_SUBDIR) -e HOME="/$(COMPONENT_NAME)" - -JSONNET_FILES ?= $(shell find . -type f -not -path './vendor/*' \( -name '*.*jsonnet' -or -name '*.libsonnet' \)) -JSONNETFMT_ARGS ?= --in-place --pad-arrays -JSONNET_IMAGE ?= docker.io/bitnami/jsonnet:latest -JSONNET_DOCKER ?= $(DOCKER_CMD) $(DOCKER_ARGS) $(root_volume) --entrypoint=jsonnetfmt $(JSONNET_IMAGE) - -YAMLLINT_ARGS ?= --no-warnings -YAMLLINT_CONFIG ?= .yamllint.yml -YAMLLINT_IMAGE ?= docker.io/cytopia/yamllint:latest -YAMLLINT_DOCKER ?= $(DOCKER_CMD) $(DOCKER_ARGS) $(root_volume) $(YAMLLINT_IMAGE) - -VALE_CMD ?= $(DOCKER_CMD) $(DOCKER_ARGS) $(root_volume) --volume "$${PWD}"/docs/modules:/pages docker.io/vshn/vale:2.1.1 -VALE_ARGS ?= --minAlertLevel=error --config=/pages/ROOT/pages/.vale.ini /pages - -ANTORA_PREVIEW_CMD ?= $(DOCKER_CMD) run --rm --publish 35729:35729 --publish 2020:2020 --volume "${PWD}/.git":/preview/antora/.git --volume "${PWD}/docs":/preview/antora/docs docker.io/vshn/antora-preview:3.0.1.1 --style=syn --antora=docs - - -COMMODORE_CMD ?= $(DOCKER_CMD) $(DOCKER_ARGS) $(root_volume) docker.io/projectsyn/commodore:latest -COMPILE_CMD ?= $(COMMODORE_CMD) component compile . $(commodore_args) -JB_CMD ?= $(DOCKER_CMD) $(DOCKER_ARGS) --entrypoint /usr/local/bin/jb docker.io/projectsyn/commodore:latest install -GOLDEN_FILES ?= $(shell find tests/golden/$(instance) -type f) - -KUBENT_FILES ?= $(shell echo "$(GOLDEN_FILES)" | sed 's/ /,/g') -KUBENT_ARGS ?= -c=false --helm2=false --helm3=false -e -# Use our own kubent image until the upstream image is available -KUBENT_IMAGE ?= docker.io/projectsyn/kubent:latest -KUBENT_DOCKER ?= $(DOCKER_CMD) $(DOCKER_ARGS) $(root_volume) --entrypoint=/app/kubent $(KUBENT_IMAGE) - -instance ?= billing-collector-cloudservices -test_instances = tests/billing-collector-cloudservices.yml tests/exoscale-metrics-collector.yml tests/collector-cloudscale-lpg-2.yml tests/cloudscale-metrics-collector.yml diff --git a/component/class/billing-collector-cloudservices.yml b/component/class/billing-collector-cloudservices.yml deleted file mode 100644 index 355d9ab..0000000 --- a/component/class/billing-collector-cloudservices.yml +++ /dev/null @@ -1,13 +0,0 @@ -parameters: - kapitan: - compile: - - input_paths: - - ${_base_directory}/component/app.jsonnet - input_type: jsonnet - output_path: apps/ - output_type: yaml - - input_paths: - - ${_base_directory}/component/main.jsonnet - input_type: jsonnet - output_type: yaml - output_path: ${_instance} diff --git a/component/class/defaults.yml b/component/class/defaults.yml deleted file mode 100644 index 93e0bb8..0000000 --- a/component/class/defaults.yml +++ /dev/null @@ -1,55 +0,0 @@ -parameters: - billing_collector_cloudservices: - =_metadata: - multi_instance: true - - namespace: appuio-cloud-reporting - - database: ${appuio_cloud_reporting:database} - database_secret: ${appuio_cloud_reporting:database_secret} - database_env: ${appuio_cloud_reporting:database_env} - extra_volumes: ${appuio_cloud_reporting:extra_volumes} - - cloud_reporting_dbsecret_name: reporting-db - - secrets: - exoscale: - credentials: - stringData: - EXOSCALE_API_KEY: "?{vaultkv:${cluster:tenant}/${cluster:name}/billing-collector-cloudservices/${_instance}/exoscale-key}" - EXOSCALE_API_SECRET: "?{vaultkv:${cluster:tenant}/${cluster:name}/billing-collector-cloudservices/${_instance}/exoscale-secret}" - KUBERNETES_SERVER_URL: "?{vaultkv:${cluster:tenant}/${cluster:name}/billing-collector-cloudservices/${_instance}/cluster-server}" - KUBERNETES_SERVER_TOKEN: "?{vaultkv:${cluster:tenant}/${cluster:name}/billing-collector-cloudservices/${_instance}/cluster-token}" - cloudscale: - credentials: - stringData: - CLOUDSCALE_API_TOKEN: "?{vaultkv:${cluster:tenant}/${cluster:name}/billing-collector-cloudservices/${_instance}/token}" - KUBERNETES_SERVER_URL: "?{vaultkv:${cluster:tenant}/${cluster:name}/billing-collector-cloudservices/${_instance}/cluster-server}" - KUBERNETES_SERVER_TOKEN: "?{vaultkv:${cluster:tenant}/${cluster:name}/billing-collector-cloudservices/${_instance}/cluster-token}" - images: - collector: - registry: 'ghcr.io' - repository: 'vshn/billing-collector-cloudservices' - tag: v1.0.3 - - exoscale: - enabled: false - objectStorage: - enabled: true - # Times in UTC! Don't run job around midnight as exoscale API may return incomplete data - # schedule for objectstorage cronjob - # default: Every day at minute 10 past hour 10, 16 and 20. - schedule: '10 10,16,20 * * *' - - dbaas: - # enable DBaaS cronjob in addition to objectstorage cronjob. - enabled: false - # schedule for DBaaS cronjob every 15min - schedule: '*/15 * * * *' - - cloudscale: - enabled: false - objectStorage: - enabled: true - # Times in UTC! - schedule: '10 4,10,16 * * *' diff --git a/component/component/app.jsonnet b/component/component/app.jsonnet deleted file mode 100644 index 4420ae1..0000000 --- a/component/component/app.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -local kap = import 'lib/kapitan.libjsonnet'; -local inv = kap.inventory(); -local paramsACR = inv.parameters.appuio_cloud_reporting; -local argocd = import 'lib/argocd.libjsonnet'; - -local instance = inv.parameters._instance; -local app = argocd.App(instance, paramsACR.namespace); - -{ - [instance]: app, -} diff --git a/component/component/main.jsonnet b/component/component/main.jsonnet deleted file mode 100644 index 535f6d0..0000000 --- a/component/component/main.jsonnet +++ /dev/null @@ -1,137 +0,0 @@ -local kap = import 'lib/kapitan.libjsonnet'; -local inv = kap.inventory(); -local params = inv.parameters.billing_collector_cloudservices; -local kube = import 'lib/kube.libjsonnet'; -local com = import 'lib/commodore.libjsonnet'; -local collectorImage = '%(registry)s/%(repository)s:%(tag)s' % params.images.collector; -local alias = inv.parameters._instance; -local alias_suffix = '-' + alias; -local credentials_secret_name = 'credentials' + alias_suffix; -local component_name = 'billing-collector-cloudservices'; - -assert std.member(inv.applications, 'appuio-cloud-reporting') : 'Component appuio-cloud-reporting must be installed'; - -local labels = { - 'app.kubernetes.io/name': component_name, - 'app.kubernetes.io/managed-by': 'commodore', - 'app.kubernetes.io/part-of': 'appuio-cloud-reporting', - 'app.kubernetes.io/component': component_name, -}; - -local secret(key) = [ - if params.secrets[key][s] != null then - kube.Secret(s + alias_suffix) { - metadata+: { - namespace: params.namespace, - }, - } + com.makeMergeable(params.secrets[key][s]) - for s in std.objectFields(params.secrets[key]) -]; - -local dbEnv = [ - { - name: name, - valueFrom: { - secretKeyRef: { - name: params.cloud_reporting_dbsecret_name, - key: name, - }, - }, - } - for name in std.objectFields(params.database_secret) -] + [ - { - name: name, - [if std.type(params.database_env[name]) == 'string' then 'value' else 'valueFrom']: params.database_env[name], - } - for name in std.objectFields(params.database_env) -] + [ - assert params.database.url != null : 'database.url must be set.'; - { - name: 'DB_PARAMS', - value: params.database.parameters, - }, - { - name: 'ACR_DB_URL', - value: params.database.url, - }, -]; - -local cronjob(name, args, schedule) = { - kind: 'CronJob', - apiVersion: 'batch/v1', - metadata: { - name: name, - namespace: params.namespace, - labels+: labels, - }, - spec: { - concurrencyPolicy: 'Forbid', - failedJobsHistoryLimit: 5, - jobTemplate: { - spec: { - template: { - spec: { - restartPolicy: 'OnFailure', - containers: [ - { - name: 'billing-collector-cloudservices-backfill', - image: collectorImage, - args: args, - envFrom: [ - { - secretRef: { - name: credentials_secret_name, - }, - }, - ], - env: dbEnv, - resources: {}, - [if std.length(params.extra_volumes) > 0 then 'volumeMounts']: [ - { name: name } + params.extra_volumes[name].mount_spec - for name in std.objectFields(params.extra_volumes) - ], - }, - ], - [if std.length(params.extra_volumes) > 0 then 'volumes']: [ - { name: name } + params.extra_volumes[name].volume_spec - for name in std.objectFields(params.extra_volumes) - ], - }, - }, - }, - }, - schedule: schedule, - successfulJobsHistoryLimit: 3, - }, -}; - -assert params.exoscale.enabled != params.cloudscale.enabled : 'only one of the components can be enabled: cloudscale or exoscale. not both and not neither.'; - -(if params.exoscale.enabled then { - local secrets = params.secrets.exoscale, - assert secrets != null : 'secrets must be set.', - assert secrets.credentials != null : 'secrets.credentials must be set.', - assert secrets.credentials.stringData != null : 'secrets.credentials.stringData must be set.', - assert secrets.credentials.stringData.EXOSCALE_API_KEY != null : 'secrets.credentials.stringData.EXOSCALE_API_KEY must be set.', - assert secrets.credentials.stringData.EXOSCALE_API_SECRET != null : 'secrets.credentials.stringData.EXOSCALE_API_SECRET must be set.', - assert secrets.credentials.stringData.KUBERNETES_SERVER_URL != null : 'secrets.credentials.stringData.KUBERNETES_SERVER_URL must be set.', - assert secrets.credentials.stringData.KUBERNETES_SERVER_TOKEN != null : 'secrets.credentials.stringData.KUBERNETES_SERVER_TOKEN must be set.', - - secrets: std.filter(function(it) it != null, secret('exoscale')), - objectStorageCronjob: cronjob(alias + '-objectstorage', [ 'exoscale', 'objectstorage' ], params.exoscale.objectStorage.schedule), - [if params.exoscale.dbaas.enabled then 'dbaasCronjob']: cronjob(alias + '-dbaas', [ 'exoscale', 'dbaas' ], params.exoscale.dbaas.schedule), - } else {}) -+ -(if params.cloudscale.enabled then { - local secrets = params.secrets.cloudscale, - assert secrets != null : 'secrets must be set.', - assert secrets.credentials != null : 'secrets.credentials must be set.', - assert secrets.credentials.stringData != null : 'secrets.credentials.stringData must be set.', - assert secrets.credentials.stringData.CLOUDSCALE_API_TOKEN != null : 'secrets.credentials.stringData.CLOUDSCALE_API_TOKEN must be set.', - assert secrets.credentials.stringData.KUBERNETES_SERVER_URL != null : 'secrets.credentials.stringData.KUBERNETES_SERVER_URL must be set.', - assert secrets.credentials.stringData.KUBERNETES_SERVER_TOKEN != null : 'secrets.credentials.stringData.KUBERNETES_SERVER_TOKEN must be set.', - - secrets: std.filter(function(it) it != null, secret('cloudscale')), - [if params.cloudscale.objectStorage.enabled then 'objectStorageCronjob']: cronjob(alias + '-objectstorage', [ 'cloudscale', 'objectstorage' ], params.cloudscale.objectStorage.schedule), - } else {}) diff --git a/component/tests/billing-collector-cloudservices.yml b/component/tests/billing-collector-cloudservices.yml deleted file mode 100644 index d03688b..0000000 --- a/component/tests/billing-collector-cloudservices.yml +++ /dev/null @@ -1,23 +0,0 @@ -applications: - - appuio-cloud-reporting - -parameters: - appuio_cloud_reporting: - database: - url: postgres://$(DB_USER):$(DB_PASSWORD)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?$(DB_PARAMS) - parameters: '' - database_secret: - DB_USER: appuio-cloud-reporting - DB_PASSWORD: letmein - DB_HOST: db.example.com - DB_PORT: 5432 - DB_NAME: appuio-cloud-reporting - database_env: {} - - extra_volumes: {} - - billing_collector_cloudservices: - exoscale: - enabled: true - dbaas: - enabled: true diff --git a/component/tests/cloudscale-metrics-collector.yml b/component/tests/cloudscale-metrics-collector.yml deleted file mode 100644 index e0dfc9e..0000000 --- a/component/tests/cloudscale-metrics-collector.yml +++ /dev/null @@ -1,46 +0,0 @@ -applications: - - appuio-cloud-reporting - -parameters: - appuio_cloud_reporting: - database: - url: postgres://$(DB_USER):$(DB_PASSWORD)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?$(DB_PARAMS) - parameters: '' - database_secret: {} - database_env: - DB_USER: - secretKeyRef: - name: reporting-db-prod-cred - key: POSTGRESQL_USER - DB_PASSWORD: - secretKeyRef: - name: reporting-db-prod-cred - key: POSTGRESQL_PASSWORD - DB_HOST: - secretKeyRef: - name: reporting-db-prod-cred - key: POSTGRESQL_HOST - DB_PORT: - secretKeyRef: - name: reporting-db-prod-cred - key: POSTGRESQL_PORT - DB_NAME: - secretKeyRef: - name: reporting-db-prod-cred - key: POSTGRESQL_DB - - extra_volumes: - dbsecret: - mount_spec: - readOnly: true - mountPath: /secrets/database - volume_spec: - secret: - secretName: reporting-db-prod-cred - defaultMode: 0600 - - billing_collector_cloudservices: - cloudscale: - enabled: true - objectStorage: - enabled: true diff --git a/component/tests/collector-cloudscale-lpg-2.yml b/component/tests/collector-cloudscale-lpg-2.yml deleted file mode 100644 index f31d0fd..0000000 --- a/component/tests/collector-cloudscale-lpg-2.yml +++ /dev/null @@ -1,22 +0,0 @@ -applications: - - metrics-collector as collector-cloudscale-lpg-2 - - appuio-cloud-reporting - -parameters: - appuio_cloud_reporting: - database: - url: postgres://$(DB_USER):$(DB_PASSWORD)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?$(DB_PARAMS) - parameters: '' - database_secret: - DB_USER: appuio-cloud-reporting - DB_PASSWORD: letmein - DB_HOST: db.example.com - DB_PORT: 5432 - DB_NAME: appuio-cloud-reporting - database_env: {} - - extra_volumes: {} - - billing_collector_cloudservices: - exoscale: - enabled: true diff --git a/component/tests/exoscale-metrics-collector.yml b/component/tests/exoscale-metrics-collector.yml deleted file mode 100644 index 66831c6..0000000 --- a/component/tests/exoscale-metrics-collector.yml +++ /dev/null @@ -1,46 +0,0 @@ -applications: - - appuio-cloud-reporting - -parameters: - appuio_cloud_reporting: - database: - url: postgres://$(DB_USER):$(DB_PASSWORD)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?$(DB_PARAMS) - parameters: '' - database_secret: {} - database_env: - DB_USER: - secretKeyRef: - name: reporting-db-prod-cred - key: POSTGRESQL_USER - DB_PASSWORD: - secretKeyRef: - name: reporting-db-prod-cred - key: POSTGRESQL_PASSWORD - DB_HOST: - secretKeyRef: - name: reporting-db-prod-cred - key: POSTGRESQL_HOST - DB_PORT: - secretKeyRef: - name: reporting-db-prod-cred - key: POSTGRESQL_PORT - DB_NAME: - secretKeyRef: - name: reporting-db-prod-cred - key: POSTGRESQL_DB - - extra_volumes: - dbsecret: - mount_spec: - readOnly: true - mountPath: /secrets/database - volume_spec: - secret: - secretName: reporting-db-prod-cred - defaultMode: 0600 - - billing_collector_cloudservices: - exoscale: - enabled: true - dbaas: - enabled: true diff --git a/component/tests/golden/billing-collector-cloudservices/billing-collector-cloudservices/apps/billing-collector-cloudservices.yaml b/component/tests/golden/billing-collector-cloudservices/billing-collector-cloudservices/apps/billing-collector-cloudservices.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/component/tests/golden/billing-collector-cloudservices/billing-collector-cloudservices/billing-collector-cloudservices/dbaasCronjob.yaml b/component/tests/golden/billing-collector-cloudservices/billing-collector-cloudservices/billing-collector-cloudservices/dbaasCronjob.yaml deleted file mode 100644 index 57c1dcc..0000000 --- a/component/tests/golden/billing-collector-cloudservices/billing-collector-cloudservices/billing-collector-cloudservices/dbaasCronjob.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - labels: - app.kubernetes.io/component: billing-collector-cloudservices - app.kubernetes.io/managed-by: commodore - app.kubernetes.io/name: billing-collector-cloudservices - app.kubernetes.io/part-of: appuio-cloud-reporting - name: billing-collector-cloudservices-dbaas - namespace: appuio-cloud-reporting -spec: - concurrencyPolicy: Forbid - failedJobsHistoryLimit: 5 - jobTemplate: - spec: - template: - spec: - containers: - - args: - - exoscale - - dbaas - env: - - name: DB_HOST - valueFrom: - secretKeyRef: - key: DB_HOST - name: reporting-db - - name: DB_NAME - valueFrom: - secretKeyRef: - key: DB_NAME - name: reporting-db - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - key: DB_PASSWORD - name: reporting-db - - name: DB_PORT - valueFrom: - secretKeyRef: - key: DB_PORT - name: reporting-db - - name: DB_USER - valueFrom: - secretKeyRef: - key: DB_USER - name: reporting-db - - name: DB_PARAMS - value: '' - - name: ACR_DB_URL - value: postgres://$(DB_USER):$(DB_PASSWORD)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?$(DB_PARAMS) - envFrom: - - secretRef: - name: credentials-billing-collector-cloudservices - image: ghcr.io/vshn/billing-collector-cloudservices:v1.0.3 - name: billing-collector-cloudservices-backfill - resources: {} - restartPolicy: OnFailure - schedule: '*/15 * * * *' - successfulJobsHistoryLimit: 3 diff --git a/component/tests/golden/billing-collector-cloudservices/billing-collector-cloudservices/billing-collector-cloudservices/objectStorageCronjob.yaml b/component/tests/golden/billing-collector-cloudservices/billing-collector-cloudservices/billing-collector-cloudservices/objectStorageCronjob.yaml deleted file mode 100644 index a447de2..0000000 --- a/component/tests/golden/billing-collector-cloudservices/billing-collector-cloudservices/billing-collector-cloudservices/objectStorageCronjob.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - labels: - app.kubernetes.io/component: billing-collector-cloudservices - app.kubernetes.io/managed-by: commodore - app.kubernetes.io/name: billing-collector-cloudservices - app.kubernetes.io/part-of: appuio-cloud-reporting - name: billing-collector-cloudservices-objectstorage - namespace: appuio-cloud-reporting -spec: - concurrencyPolicy: Forbid - failedJobsHistoryLimit: 5 - jobTemplate: - spec: - template: - spec: - containers: - - args: - - exoscale - - objectstorage - env: - - name: DB_HOST - valueFrom: - secretKeyRef: - key: DB_HOST - name: reporting-db - - name: DB_NAME - valueFrom: - secretKeyRef: - key: DB_NAME - name: reporting-db - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - key: DB_PASSWORD - name: reporting-db - - name: DB_PORT - valueFrom: - secretKeyRef: - key: DB_PORT - name: reporting-db - - name: DB_USER - valueFrom: - secretKeyRef: - key: DB_USER - name: reporting-db - - name: DB_PARAMS - value: '' - - name: ACR_DB_URL - value: postgres://$(DB_USER):$(DB_PASSWORD)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?$(DB_PARAMS) - envFrom: - - secretRef: - name: credentials-billing-collector-cloudservices - image: ghcr.io/vshn/billing-collector-cloudservices:v1.0.3 - name: billing-collector-cloudservices-backfill - resources: {} - restartPolicy: OnFailure - schedule: 10 10,16,20 * * * - successfulJobsHistoryLimit: 3 diff --git a/component/tests/golden/billing-collector-cloudservices/billing-collector-cloudservices/billing-collector-cloudservices/secrets.yaml b/component/tests/golden/billing-collector-cloudservices/billing-collector-cloudservices/billing-collector-cloudservices/secrets.yaml deleted file mode 100644 index d3eaf10..0000000 --- a/component/tests/golden/billing-collector-cloudservices/billing-collector-cloudservices/billing-collector-cloudservices/secrets.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -data: {} -kind: Secret -metadata: - annotations: {} - labels: - name: credentials-billing-collector-cloudservices - name: credentials-billing-collector-cloudservices - namespace: appuio-cloud-reporting -stringData: - EXOSCALE_API_KEY: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/billing-collector-cloudservices/exoscale-key - EXOSCALE_API_SECRET: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/billing-collector-cloudservices/exoscale-secret - KUBERNETES_SERVER_TOKEN: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/billing-collector-cloudservices/cluster-token - KUBERNETES_SERVER_URL: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/billing-collector-cloudservices/cluster-server -type: Opaque diff --git a/component/tests/golden/cloudscale-metrics-collector/cloudscale-metrics-collector/apps/cloudscale-metrics-collector.yaml b/component/tests/golden/cloudscale-metrics-collector/cloudscale-metrics-collector/apps/cloudscale-metrics-collector.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/component/tests/golden/cloudscale-metrics-collector/cloudscale-metrics-collector/cloudscale-metrics-collector/objectStorageCronjob.yaml b/component/tests/golden/cloudscale-metrics-collector/cloudscale-metrics-collector/cloudscale-metrics-collector/objectStorageCronjob.yaml deleted file mode 100644 index 570ff2c..0000000 --- a/component/tests/golden/cloudscale-metrics-collector/cloudscale-metrics-collector/cloudscale-metrics-collector/objectStorageCronjob.yaml +++ /dev/null @@ -1,69 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - labels: - app.kubernetes.io/component: billing-collector-cloudservices - app.kubernetes.io/managed-by: commodore - app.kubernetes.io/name: billing-collector-cloudservices - app.kubernetes.io/part-of: appuio-cloud-reporting - name: cloudscale-metrics-collector-objectstorage - namespace: appuio-cloud-reporting -spec: - concurrencyPolicy: Forbid - failedJobsHistoryLimit: 5 - jobTemplate: - spec: - template: - spec: - containers: - - args: - - cloudscale - - objectstorage - env: - - name: DB_HOST - valueFrom: - secretKeyRef: - key: POSTGRESQL_HOST - name: reporting-db-prod-cred - - name: DB_NAME - valueFrom: - secretKeyRef: - key: POSTGRESQL_DB - name: reporting-db-prod-cred - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - key: POSTGRESQL_PASSWORD - name: reporting-db-prod-cred - - name: DB_PORT - valueFrom: - secretKeyRef: - key: POSTGRESQL_PORT - name: reporting-db-prod-cred - - name: DB_USER - valueFrom: - secretKeyRef: - key: POSTGRESQL_USER - name: reporting-db-prod-cred - - name: DB_PARAMS - value: '' - - name: ACR_DB_URL - value: postgres://$(DB_USER):$(DB_PASSWORD)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?$(DB_PARAMS) - envFrom: - - secretRef: - name: credentials-cloudscale-metrics-collector - image: ghcr.io/vshn/billing-collector-cloudservices:v1.0.3 - name: billing-collector-cloudservices-backfill - resources: {} - volumeMounts: - - mountPath: /secrets/database - name: dbsecret - readOnly: true - restartPolicy: OnFailure - volumes: - - name: dbsecret - secret: - defaultMode: 384 - secretName: reporting-db-prod-cred - schedule: 10 4,10,16 * * * - successfulJobsHistoryLimit: 3 diff --git a/component/tests/golden/cloudscale-metrics-collector/cloudscale-metrics-collector/cloudscale-metrics-collector/secrets.yaml b/component/tests/golden/cloudscale-metrics-collector/cloudscale-metrics-collector/cloudscale-metrics-collector/secrets.yaml deleted file mode 100644 index 2c4a4da..0000000 --- a/component/tests/golden/cloudscale-metrics-collector/cloudscale-metrics-collector/cloudscale-metrics-collector/secrets.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -data: {} -kind: Secret -metadata: - annotations: {} - labels: - name: credentials-cloudscale-metrics-collector - name: credentials-cloudscale-metrics-collector - namespace: appuio-cloud-reporting -stringData: - CLOUDSCALE_API_TOKEN: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/cloudscale-metrics-collector/token - KUBERNETES_SERVER_TOKEN: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/cloudscale-metrics-collector/cluster-token - KUBERNETES_SERVER_URL: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/cloudscale-metrics-collector/cluster-server -type: Opaque diff --git a/component/tests/golden/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/apps/collector-cloudscale-lpg-2.yaml b/component/tests/golden/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/apps/collector-cloudscale-lpg-2.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/component/tests/golden/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/objectStorageCronjob.yaml b/component/tests/golden/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/objectStorageCronjob.yaml deleted file mode 100644 index d3b98a8..0000000 --- a/component/tests/golden/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/objectStorageCronjob.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - labels: - app.kubernetes.io/component: billing-collector-cloudservices - app.kubernetes.io/managed-by: commodore - app.kubernetes.io/name: billing-collector-cloudservices - app.kubernetes.io/part-of: appuio-cloud-reporting - name: collector-cloudscale-lpg-2-objectstorage - namespace: appuio-cloud-reporting -spec: - concurrencyPolicy: Forbid - failedJobsHistoryLimit: 5 - jobTemplate: - spec: - template: - spec: - containers: - - args: - - exoscale - - objectstorage - env: - - name: DB_HOST - valueFrom: - secretKeyRef: - key: DB_HOST - name: reporting-db - - name: DB_NAME - valueFrom: - secretKeyRef: - key: DB_NAME - name: reporting-db - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - key: DB_PASSWORD - name: reporting-db - - name: DB_PORT - valueFrom: - secretKeyRef: - key: DB_PORT - name: reporting-db - - name: DB_USER - valueFrom: - secretKeyRef: - key: DB_USER - name: reporting-db - - name: DB_PARAMS - value: '' - - name: ACR_DB_URL - value: postgres://$(DB_USER):$(DB_PASSWORD)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?$(DB_PARAMS) - envFrom: - - secretRef: - name: credentials-collector-cloudscale-lpg-2 - image: ghcr.io/vshn/billing-collector-cloudservices:v1.0.3 - name: billing-collector-cloudservices-backfill - resources: {} - restartPolicy: OnFailure - schedule: 10 10,16,20 * * * - successfulJobsHistoryLimit: 3 diff --git a/component/tests/golden/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/secrets.yaml b/component/tests/golden/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/secrets.yaml deleted file mode 100644 index 5c039ed..0000000 --- a/component/tests/golden/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/collector-cloudscale-lpg-2/secrets.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -data: {} -kind: Secret -metadata: - annotations: {} - labels: - name: credentials-collector-cloudscale-lpg-2 - name: credentials-collector-cloudscale-lpg-2 - namespace: appuio-cloud-reporting -stringData: - EXOSCALE_API_KEY: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/collector-cloudscale-lpg-2/exoscale-key - EXOSCALE_API_SECRET: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/collector-cloudscale-lpg-2/exoscale-secret - KUBERNETES_SERVER_TOKEN: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/collector-cloudscale-lpg-2/cluster-token - KUBERNETES_SERVER_URL: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/collector-cloudscale-lpg-2/cluster-server -type: Opaque diff --git a/component/tests/golden/exoscale-metrics-collector/exoscale-metrics-collector/apps/exoscale-metrics-collector.yaml b/component/tests/golden/exoscale-metrics-collector/exoscale-metrics-collector/apps/exoscale-metrics-collector.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/component/tests/golden/exoscale-metrics-collector/exoscale-metrics-collector/exoscale-metrics-collector/dbaasCronjob.yaml b/component/tests/golden/exoscale-metrics-collector/exoscale-metrics-collector/exoscale-metrics-collector/dbaasCronjob.yaml deleted file mode 100644 index 0d81508..0000000 --- a/component/tests/golden/exoscale-metrics-collector/exoscale-metrics-collector/exoscale-metrics-collector/dbaasCronjob.yaml +++ /dev/null @@ -1,69 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - labels: - app.kubernetes.io/component: billing-collector-cloudservices - app.kubernetes.io/managed-by: commodore - app.kubernetes.io/name: billing-collector-cloudservices - app.kubernetes.io/part-of: appuio-cloud-reporting - name: exoscale-metrics-collector-dbaas - namespace: appuio-cloud-reporting -spec: - concurrencyPolicy: Forbid - failedJobsHistoryLimit: 5 - jobTemplate: - spec: - template: - spec: - containers: - - args: - - exoscale - - dbaas - env: - - name: DB_HOST - valueFrom: - secretKeyRef: - key: POSTGRESQL_HOST - name: reporting-db-prod-cred - - name: DB_NAME - valueFrom: - secretKeyRef: - key: POSTGRESQL_DB - name: reporting-db-prod-cred - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - key: POSTGRESQL_PASSWORD - name: reporting-db-prod-cred - - name: DB_PORT - valueFrom: - secretKeyRef: - key: POSTGRESQL_PORT - name: reporting-db-prod-cred - - name: DB_USER - valueFrom: - secretKeyRef: - key: POSTGRESQL_USER - name: reporting-db-prod-cred - - name: DB_PARAMS - value: '' - - name: ACR_DB_URL - value: postgres://$(DB_USER):$(DB_PASSWORD)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?$(DB_PARAMS) - envFrom: - - secretRef: - name: credentials-exoscale-metrics-collector - image: ghcr.io/vshn/billing-collector-cloudservices:v1.0.3 - name: billing-collector-cloudservices-backfill - resources: {} - volumeMounts: - - mountPath: /secrets/database - name: dbsecret - readOnly: true - restartPolicy: OnFailure - volumes: - - name: dbsecret - secret: - defaultMode: 384 - secretName: reporting-db-prod-cred - schedule: '*/15 * * * *' - successfulJobsHistoryLimit: 3 diff --git a/component/tests/golden/exoscale-metrics-collector/exoscale-metrics-collector/exoscale-metrics-collector/objectStorageCronjob.yaml b/component/tests/golden/exoscale-metrics-collector/exoscale-metrics-collector/exoscale-metrics-collector/objectStorageCronjob.yaml deleted file mode 100644 index c86b47f..0000000 --- a/component/tests/golden/exoscale-metrics-collector/exoscale-metrics-collector/exoscale-metrics-collector/objectStorageCronjob.yaml +++ /dev/null @@ -1,69 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - labels: - app.kubernetes.io/component: billing-collector-cloudservices - app.kubernetes.io/managed-by: commodore - app.kubernetes.io/name: billing-collector-cloudservices - app.kubernetes.io/part-of: appuio-cloud-reporting - name: exoscale-metrics-collector-objectstorage - namespace: appuio-cloud-reporting -spec: - concurrencyPolicy: Forbid - failedJobsHistoryLimit: 5 - jobTemplate: - spec: - template: - spec: - containers: - - args: - - exoscale - - objectstorage - env: - - name: DB_HOST - valueFrom: - secretKeyRef: - key: POSTGRESQL_HOST - name: reporting-db-prod-cred - - name: DB_NAME - valueFrom: - secretKeyRef: - key: POSTGRESQL_DB - name: reporting-db-prod-cred - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - key: POSTGRESQL_PASSWORD - name: reporting-db-prod-cred - - name: DB_PORT - valueFrom: - secretKeyRef: - key: POSTGRESQL_PORT - name: reporting-db-prod-cred - - name: DB_USER - valueFrom: - secretKeyRef: - key: POSTGRESQL_USER - name: reporting-db-prod-cred - - name: DB_PARAMS - value: '' - - name: ACR_DB_URL - value: postgres://$(DB_USER):$(DB_PASSWORD)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?$(DB_PARAMS) - envFrom: - - secretRef: - name: credentials-exoscale-metrics-collector - image: ghcr.io/vshn/billing-collector-cloudservices:v1.0.3 - name: billing-collector-cloudservices-backfill - resources: {} - volumeMounts: - - mountPath: /secrets/database - name: dbsecret - readOnly: true - restartPolicy: OnFailure - volumes: - - name: dbsecret - secret: - defaultMode: 384 - secretName: reporting-db-prod-cred - schedule: 10 10,16,20 * * * - successfulJobsHistoryLimit: 3 diff --git a/component/tests/golden/exoscale-metrics-collector/exoscale-metrics-collector/exoscale-metrics-collector/secrets.yaml b/component/tests/golden/exoscale-metrics-collector/exoscale-metrics-collector/exoscale-metrics-collector/secrets.yaml deleted file mode 100644 index 717c4db..0000000 --- a/component/tests/golden/exoscale-metrics-collector/exoscale-metrics-collector/exoscale-metrics-collector/secrets.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -data: {} -kind: Secret -metadata: - annotations: {} - labels: - name: credentials-exoscale-metrics-collector - name: credentials-exoscale-metrics-collector - namespace: appuio-cloud-reporting -stringData: - EXOSCALE_API_KEY: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/exoscale-metrics-collector/exoscale-key - EXOSCALE_API_SECRET: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/exoscale-metrics-collector/exoscale-secret - KUBERNETES_SERVER_TOKEN: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/exoscale-metrics-collector/cluster-token - KUBERNETES_SERVER_URL: t-silent-test-1234/c-green-test-1234/billing-collector-cloudservices/exoscale-metrics-collector/cluster-server -type: Opaque diff --git a/docs/modules/ROOT/pages/explanations/data-usage.adoc b/docs/modules/ROOT/pages/explanations/data-usage.adoc index 0b1cf2f..10c37b6 100644 --- a/docs/modules/ROOT/pages/explanations/data-usage.adoc +++ b/docs/modules/ROOT/pages/explanations/data-usage.adoc @@ -1,5 +1,7 @@ = Data Usage +WARNING: This page is outdated and the information below is not correct anymore. + This page gives a brief overview how resources data usage (e.g. buckets) is saved to the postgres billing database. == Data flow diff --git a/docs/modules/ROOT/pages/how-tos/installation.adoc b/docs/modules/ROOT/pages/how-tos/installation.adoc index cb6fa2a..5895b2f 100644 --- a/docs/modules/ROOT/pages/how-tos/installation.adoc +++ b/docs/modules/ROOT/pages/how-tos/installation.adoc @@ -2,13 +2,11 @@ == Requirements -This component requires https://github.com/appuio/component-appuio-cloud-reporting[component-appuio-cloud-reporting] and is installed into the same namespace. -This is required for this component to be able to access the billing database and its connection secrets. -It also requires an Exoscale IAMKey and a Kubernetes/OpenShift Service Account token in the target cluster to get resources (e.g. SOS buckets, Postgres, etc). +This controller is deployed with https://github.com/vshn/component-appcat[component-appcat]. == Sources -The data is matched from a k8s cluster and an Exoscale organization. +The data is matched from the local k8s cluster and the associated Exoscale organization. The Kubernetes Service Account token is required to have `get` permissions on `Namespaces` and to the following managed resources: * `buckets.exoscale.crossplane.io` @@ -20,27 +18,3 @@ The Kubernetes Service Account token is required to have `get` permissions on `N === Exoscale organization The Access Key (IAM Key) from an Exoscale organization is required to have read access across all managed resources (e.g. SOS buckets, Postgres, etc). - -=== K8s cluster -The access to the desired cluster from where the metrics should be collected has to be configured. The current configuration for each instance is done in the -`syn-tenant-repo` under `manifests//billing` - -== Example - -[source,yaml] ----- -applications: - - billing-collector-cloudservices - -parameters: - metrics_collector: - exoscale: - enabled: true - objectStorage: - schedule: '10 10,16,20 * * *' - dbaas: - enabled: true # must be manually enabled to scrape DBaas metrics - schedule: '*/15 * * * *' ----- - -See the xref:references/parameters.adoc[parameters] reference for a full list of parameters. diff --git a/docs/modules/ROOT/pages/how-tos/multi-instance.adoc b/docs/modules/ROOT/pages/how-tos/multi-instance.adoc deleted file mode 100644 index 7205459..0000000 --- a/docs/modules/ROOT/pages/how-tos/multi-instance.adoc +++ /dev/null @@ -1,44 +0,0 @@ -= Deploy multiple instances - -This guide provides an example how to deploy multiple instances of this component. - -== Requirements - - -. Prepare catalog by configuring 2 instances -+ -[source,yaml] ----- -applications: - - billing-collector-cloudservices as collector-exoscale-ch-gva-2 <1> - - billing-collector-cloudservices as collector-cloudscale-rma-0 <2> -parameters: - appuio_cloud_reporting: - namespace: 'appuio-cloud-reporting' - database: - name: 'reporting' - host: 'reporting-db.appuio-reporting.svc' - parameters: 'sslmode=disable' - password: 'passw0rd' - port: 5432 ----- -<1> Instance one with alias name collector-exoscale-ch-gva-2 -<2> Instance two with alias name collector-cloudscale-rma-0 -+ - -. Add relevant entries to Vault -+ -[source,bash] ----- -parent="clusters/kv/${TENANT_ID}/${CLUSTER_ID}" -instance_1="collector-exoscale-ch-gva-2" -instance_2="collector-cloudscale-rma-0" - -vault kv put "${parent}/billing-collector-cloudservices/${instance_1}" exoscale-key= exoscale-secret= cluster-server= cluster-token= -vault kv put "${parent}/billing-collector-cloudservices/${instance_2}" exoscale-key= exoscale-secret= cluster-server= cluster-token= ----- -+ - -. Compile and push the cluster catalog -. Wait until changes are applied -. Verify that the instances are up and configured correctly diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index 6782803..af47c81 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -1,9 +1,7 @@ = billing-collector-cloudservices -Batch job to sync usage data from the Exoscale and Cloudscale metrics APIs to the https://github.com/appuio/appuio-cloud-reporting/[APPUiO Cloud Reporting] database. +Controller to sync usage data from the Exoscale and Cloudscale metrics APIs to Odoo16 Metrics are collected taking into account product (e.g. `object-storage-storage:exoscale`), source (e.g. `exoscale:namespace`), tenant (organization) and date time. -On DBaaS, we also gather information by Plan. That can be seeing in the product. For example, in the case of a PostgreSQL database service, product may look like `postgres:exoscale:*:*:hobbyist-2`. - -See the xref:references/parameters.adoc[parameters] reference for further details. +On DBaaS, we also gather information by Plan. That can be seeing in the product. For example, in the case of a PostgreSQL database service, product may look like `appcat-exoscale-pg-hobbyist-2`. diff --git a/docs/modules/ROOT/pages/references/parameters.adoc b/docs/modules/ROOT/pages/references/parameters.adoc deleted file mode 100644 index f777453..0000000 --- a/docs/modules/ROOT/pages/references/parameters.adoc +++ /dev/null @@ -1,219 +0,0 @@ -= Parameters - -Because this component depends on the component https://hub.syn.tools/appuio-cloud-reporting/references/parameters.html[appuio-cloud-reporting], some parameters are taken from that component and are not individually configurable in this component. -In particular: - -* https://hub.syn.tools/appuio-cloud-reporting/references/parameters.html#_namespace[namespace] -* https://hub.syn.tools/appuio-cloud-reporting/references/parameters.html#_database[database] - -The following list includes only parameters of this component. -The parent key for all of them is `metrics_collector`. - -See xref:how-tos/installation.adoc[Installation] for a full example. - -== `namespace` - -[horizontal] -type:: string -default:: `${_instance}` - -The namespace in which to deploy this component. - -== `database` - -[horizontal] -type:: dictionary -default:: -+ -[source,yaml] ----- -database: ${appuio_cloud_reporting:database} ----- - -See https://hub.syn.tools/appuio-cloud-reporting/references/parameters.html#_database[appuio-cloud-reporting docs] for reference. - - -== `database_secret` - -[horizontal] -type:: dictionary -default:: -+ -[source,yaml] ----- -database_secret: ${appuio_cloud_reporting:database_secret} ----- - -See https://hub.syn.tools/appuio-cloud-reporting/references/parameters.html#_database_secret[appuio-cloud-reporting docs] for reference. - -== `database_env` - -[horizontal] -type:: dictionary -default:: -+ -[source,yaml] ----- -database_env: ${appuio_cloud_reporting:database_env} ----- - -See https://hub.syn.tools/appuio-cloud-reporting/references/parameters.html#_database_env[appuio-cloud-reporting docs] for reference. - - -== `extra_volumes` - -[horizontal] -type:: dictionary -default:: -+ -[source,yaml] ----- -extra_volumes: ${appuio_cloud_reporting:extra_volumes} ----- - -See https://hub.syn.tools/appuio-cloud-reporting/references/parameters.html#_extra_volumes[appuio-cloud-reporting docs] for reference. - - -== `images` - -[horizontal] -type:: dictionary -default:: https://github.com/vshn/billing-collector-cloudservices/blob/master/component/class/defaults.yml[See class/defaults.yml]. - -Dictionary containing the container images used by this component. - -== `exoscale.enabled` - -[horizontal] -type:: bool -default:: `false` - -Set to true if you want to deploy the exosacle metrics collector components. - -=== `exoscale.objectStorage.enabled` - -[horizontal] -type:: bool -default:: `true` - -Enable exoscale object storage cronjob. - -=== `exoscale.objectStorage.schedule` - -[horizontal] -type:: string -default:: `10 10,16,20 * * *` - -The cron schedule at which the object storage metrics collection job is spawned. - -=== `exoscale.dbaas.enabled` - -[horizontal] -type:: bool -default:: `false` - -Whether to add the DBaaS cronjob, too. - -=== `exoscale.dbaas.schedule` - -[horizontal] -type:: string -default:: `*/15 * * * *` - -The cron schedule at which the DBaaS metrics collection job is spawned, if enabled. - -=== `secrets.exoscale.credentials.stringData.EXOSCALE_API_KEY` - -[horizontal] -type:: string -default:: Required. - -The Exoscale API key. - -You need to get the token from the https://portal.exoscale.com[Exoscale Console]. -You need to select the correct account (token is limited to one account), choose "IAM" in the menu and generate a new key pair. - -=== `secrets.exoscale.credentials.stringData.EXOSCALE_API_SECRET` - -[horizontal] -type:: string -default:: Required. - -The Exoscale API secret. - -Second part of the Exoscale API credentials. -See EXOSCALE_API_KEY for instructions. - -=== `secrets.exoscale.credentials.stringData.KUBERNETES_SERVER_URL` - -[horizontal] -type:: string -default:: Required. - -The Kubernetes server URL. - -=== `secrets.exoscale.credentials.stringData.KUBERNETES_SERVER_TOKEN` - -[horizontal] -type:: string -default:: Required. - -The token to connect to a Kubernetes cluster. - -The Service Account connected to this token should have `get` and `list` permissions to the following managed resources: - -* `buckets.exoscale.crossplane.io` -* `postgresqls.exoscale.crossplane.io` - -== `cloudscale.enabled` - -[horizontal] -type:: bool -default:: `false` - -Set to true if you want to deploy the cloudscale metrics collector components. - -=== `cloudscale.objectStorage.enabled` - -[horizontal] -type:: bool -default:: `true` - -Enable cloudscale object storage cronjob. - -=== `cloudscale.objectStorage.schedule` - -[horizontal] -type:: string -default:: `10 4,10,16 * * *` - -The cron schedule at which the object storage metrics collection job is spawned. - -== `secrets.cloudscale.credentials.stringData.CLOUDSCALE_API_TOKEN` - -[horizontal] -type:: string -default:: Required. - -The cloudscale API token. - -You need to get the token from the https://control.cloudscale.ch[Cloudscale Control Panel]. -You need to select the correct Project (token is limited to one project), choose "API Tokens" in the menu and generate a new one. - -== `secrets.cloudscale.credentials.stringData.KUBERNETES_SERVER_URL` - -[horizontal] -type:: string -default:: Required. - -The Kubernetes server URL. - -== `secrets.cloudscale.credentials.stringData.KUBERNETES_SERVER_TOKEN` - -[horizontal] -type:: string -default:: Required. - -The token to connect to a Kubernetes cluster. - -The Service Account connected to this token should have `get` and `list` permissions to `buckets.cloudscale.crossplane.io` managed resource, and `get` and `list` permissions for namespaces. diff --git a/go.mod b/go.mod index e7fb0f4..bc8ffd3 100644 --- a/go.mod +++ b/go.mod @@ -1,49 +1,49 @@ module github.com/vshn/billing-collector-cloudservices -go 1.19 +go 1.21 require ( github.com/appuio/appuio-cloud-reporting v0.10.0 + github.com/appuio/control-api v0.31.0 github.com/cloudscale-ch/cloudscale-go-sdk/v2 v2.1.0 github.com/exoscale/egoscale v0.90.1 - github.com/go-logr/logr v1.2.4 - github.com/go-logr/zapr v1.2.4 - github.com/google/uuid v1.3.0 - github.com/jackc/pgx/v4 v4.18.1 - github.com/jmoiron/sqlx v1.3.5 - github.com/stretchr/testify v1.8.2 + github.com/go-logr/logr v1.3.0 + github.com/go-logr/zapr v1.3.0 + github.com/stretchr/testify v1.8.4 github.com/urfave/cli/v2 v2.24.4 github.com/vshn/provider-cloudscale v0.5.0 github.com/vshn/provider-exoscale v0.8.1 - go.uber.org/zap v1.24.0 + go.uber.org/zap v1.26.0 + golang.org/x/oauth2 v0.12.0 gopkg.in/dnaeon/go-vcr.v3 v3.1.2 - k8s.io/api v0.26.1 - k8s.io/apimachinery v0.26.1 - k8s.io/client-go v0.26.1 - sigs.k8s.io/controller-runtime v0.14.1 + k8s.io/api v0.29.0 + k8s.io/apimachinery v0.29.0 + k8s.io/client-go v0.29.0 + sigs.k8s.io/controller-runtime v0.16.3 ) require ( github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect - github.com/benbjohnson/clock v1.1.0 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/crossplane/crossplane-runtime v0.18.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deepmap/oapi-codegen v1.12.4 // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.2.0 // indirect github.com/hashicorp/go-retryablehttp v0.7.2 // indirect @@ -55,50 +55,49 @@ require ( github.com/jackc/pgproto3/v2 v2.3.2 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgtype v1.14.0 // indirect + github.com/jackc/pgx/v4 v4.18.1 // indirect + github.com/jmoiron/sqlx v1.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kr/pretty v0.3.0 // indirect github.com/lopezator/migrator v0.3.1 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.6.1 // indirect - github.com/onsi/gomega v1.24.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.40.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/afero v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/net v0.6.0 // indirect - golang.org/x/oauth2 v0.4.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/term v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect - golang.org/x/time v0.3.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.16.0 // indirect + golang.org/x/exp v0.0.0-20230307190834-24139beb5833 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.26.1 // indirect - k8s.io/component-base v0.26.1 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20230118215034-64b6bb138190 // indirect - k8s.io/utils v0.0.0-20230115233650-391b47cb4029 // indirect + k8s.io/apiextensions-apiserver v0.28.3 // indirect + k8s.io/apiserver v0.29.0 // indirect + k8s.io/component-base v0.29.0 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20231127182322-b307cd553661 // indirect + sigs.k8s.io/apiserver-runtime v1.1.2-0.20231017233931-4d54d00b524a // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 7226c8b..85900f6 100644 --- a/go.sum +++ b/go.sum @@ -38,24 +38,34 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/appuio/appuio-cloud-reporting v0.10.0 h1:4n9XVlCr5oHfnrg8F8KHueAO+/+Qh+Hb1GIzyTgb8WE= github.com/appuio/appuio-cloud-reporting v0.10.0/go.mod h1:3UppRODpaAHvfJwMEnkYENNXkVxWCptBMbk1Jw3qHww= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/appuio/control-api v0.31.0 h1:zgWtVw1uhQbnkbrqg+0g4yFrcn/6+yLcfgjknZhhb/Q= +github.com/appuio/control-api v0.31.0/go.mod h1:Nvy0YcOw9PuQ+uFMWqWH0j3t47ckkPE+IeR8Fz/oAjU= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -67,11 +77,15 @@ github.com/cloudscale-ch/cloudscale-go-sdk/v2 v2.1.0/go.mod h1:0oHKsCRkSUkAnnCBk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -83,38 +97,38 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deepmap/oapi-codegen v1.12.4 h1:pPmn6qI9MuOtCz82WY2Xaw46EQjgvxednXXrP7g5Q2s= github.com/deepmap/oapi-codegen v1.12.4/go.mod h1:3lgHGMu6myQ2vqbbTXH2H1o4eXFTGnFiDaOaKKl5yas= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/exoscale/egoscale v0.90.1 h1:G/Uyz3Yjdvo3H2oOFS5DhnzEZARLh77IhN58xHHFOpI= github.com/exoscale/egoscale v0.90.1/go.mod h1:NDhQbdGNKwnLVC2YGTB6ds9WIPw+V5ckvEEV8ho7pFE= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -124,6 +138,8 @@ github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+ github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -156,12 +172,14 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= +github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -172,8 +190,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -190,14 +209,20 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -211,6 +236,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -275,10 +302,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -287,8 +313,9 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= +github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lopezator/migrator v0.3.1 h1:ZFPT6aC7+nGWkqhleynABZ6ftycSf6hmHHLOaryq1Og= github.com/lopezator/migrator v0.3.1/go.mod h1:X+lHDMZ9Ci3/KdbypJcQYFFwipVrJsX4fRCQ4QLauYk= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -297,6 +324,7 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -304,10 +332,11 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mattn/go-sqlite3 v1.14.8 h1:gDp86IdQsN/xWjIEmr9MF6o9mpksUgh0fu+9ByFxzIU= +github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -315,29 +344,28 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.6.1 h1:1xQPCjcqYw/J5LchOcp4/2q/jzJFjiAOc25chhnDw+Q= -github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= -github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE= -github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.40.0 h1:Afz7EVRqGg2Mqqf4JuF9vdvp1pi220m55Pi9T2JnO4Q= -github.com/prometheus/common v0.40.0/go.mod h1:L65ZJPSmfn/UBWLQIHV7dBrKFidB/wPlF1y5TlSt9OE= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -349,12 +377,14 @@ github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXY github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -370,54 +400,70 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/urfave/cli/v2 v2.24.4 h1:0gyJJEBYtCV87zI/x2nZCPyDxD51K6xM8SkwjHFCNEU= github.com/urfave/cli/v2 v2.24.4/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= github.com/vshn/provider-cloudscale v0.5.0 h1:C5Cv5MZXLaC4qOQ0B6WTBGiyndQMiDMbhTelF7cJuzw= github.com/vshn/provider-cloudscale v0.5.0/go.mod h1:TaiT6RLZoQwsHeHNLyLmWBPEJTlBRRcHISeRkkt0WKc= github.com/vshn/provider-exoscale v0.8.1 h1:jYAADQl1ngTeUajKtbqicmRrBHVr1gmf8WkcBkfRtIo= github.com/vshn/provider-exoscale v0.8.1/go.mod h1:CPD4gmpxDOnPAoJWxNRyF5mX+ASbhyE/B0AO8B/kqz0= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c h1:/RwRVN9EdXAVtdHxP7Ndn/tfmM9/goiwU0QTnLBgS4w= +go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= +go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= +go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= +go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= +go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= +go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -431,8 +477,9 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -443,6 +490,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= +golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -466,7 +515,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -500,11 +548,10 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190517181255-950ef44c6e07/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -515,8 +562,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -527,8 +574,9 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -569,38 +617,35 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -655,16 +700,17 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -715,7 +761,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -729,7 +774,12 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -743,12 +793,11 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -761,12 +810,10 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/dnaeon/go-vcr.v3 v3.1.2 h1:F1smfXBqQqwpVifDfUBQG6zzaGjzT+EnVZakrOdr5wA= @@ -775,13 +822,13 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -792,30 +839,38 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= -k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= -k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI= -k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM= -k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= -k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= -k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= -k8s.io/component-base v0.26.1 h1:4ahudpeQXHZL5kko+iDHqLj/FSGAEUnSVO0EBbgDd+4= -k8s.io/component-base v0.26.1/go.mod h1:VHrLR0b58oC035w6YQiBSbtsf0ThuSwXP+p5dD/kAWU= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230118215034-64b6bb138190 h1:5MAqxJfshQZ9NdSNGAn7CJ9vuBxAiTaqn3B4pfqD+PE= -k8s.io/kube-openapi v0.0.0-20230118215034-64b6bb138190/go.mod h1:/BYxry62FuDzmI+i9B+X2pqfySRmSOW2ARmj5Zbqhj0= -k8s.io/utils v0.0.0-20230115233650-391b47cb4029 h1:L8zDtT4jrxj+TaQYD0k8KNlr556WaVQylDXswKmX+dE= -k8s.io/utils v0.0.0-20230115233650-391b47cb4029/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= +k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= +k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08= +k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= +k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= +k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/apiserver v0.29.0 h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o= +k8s.io/apiserver v0.29.0/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= +k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kms v0.29.0 h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI= +k8s.io/kms v0.29.0/go.mod h1:mB0f9HLxRXeXUfHfn1A7rpwOlzXI1gIWu86z6buNoYA= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= +k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.14.1 h1:vThDes9pzg0Y+UbCPY3Wj34CGIYPgdmspPm2GIpxpzM= -sigs.k8s.io/controller-runtime v0.14.1/go.mod h1:GaRkrY8a7UZF0kqFFbUKG7n9ICiTY5T55P1RiE3UZlU= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= +sigs.k8s.io/apiserver-runtime v1.1.2-0.20231017233931-4d54d00b524a h1:gNAaOi/JlTDAzweUgybSazp3DQh30TRrAmb1srdEdIg= +sigs.k8s.io/apiserver-runtime v1.1.2-0.20231017233931-4d54d00b524a/go.mod h1:4b9SbLg7Sf5dlYK6gD3lIn3tHNTpeKUUjLhOLqiN8FE= +sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= +sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/main.go b/main.go index 57425bf..df828ed 100644 --- a/main.go +++ b/main.go @@ -68,6 +68,32 @@ func newApp() (context.Context, context.CancelFunc, *cli.App) { DefaultText: "console", Destination: &logFormat, }, + &cli.IntFlag{ + Name: "collectInterval", + Usage: "Interval in which the exporter checks the cloud resources", + Value: 10, + }, + &cli.IntFlag{ + Name: "billingHour", + Usage: "After which hour every day the objectstorage collector should start", + Value: 6, + Action: func(c *cli.Context, i int) error { + if i > 23 || i < 0 { + return fmt.Errorf("invalid billingHour value, needs to be between 0 and 23") + } + return nil + }, + }, + &cli.StringFlag{ + Name: "organizationOverride", + Usage: "If the collector is collecting the metrics for an APPUiO managed instance. It needs to set the name of the customer.", + Value: "", + }, + &cli.StringFlag{ + Name: "bind", + Usage: "Golang bind string. Will be used for the exporter", + Value: ":9123", + }, }, Before: func(c *cli.Context) error { logger, err := log.NewLogger(appName, version, logLevel, logFormat) @@ -75,12 +101,6 @@ func newApp() (context.Context, context.CancelFunc, *cli.App) { return fmt.Errorf("before: %w", err) } c.Context = log.NewLoggingContext(c.Context, logger) - return nil - }, - Action: func(c *cli.Context) error { - if true { - return cli.ShowAppHelp(c) - } log.Logger(c.Context).WithValues( "date", date, "commit", commit, @@ -92,6 +112,13 @@ func newApp() (context.Context, context.CancelFunc, *cli.App) { ).Info("Starting up " + appName) return nil }, + Action: func(c *cli.Context) error { + if true { + return cli.ShowAppHelp(c) + } + + return nil + }, Commands: []*cli.Command{ cmd.ExoscaleCmds(), cmd.CloudscaleCmds(), diff --git a/pkg/cloudscale/accumulate.go b/pkg/cloudscale/accumulate.go deleted file mode 100644 index 6df2ce6..0000000 --- a/pkg/cloudscale/accumulate.go +++ /dev/null @@ -1,160 +0,0 @@ -package cloudscale - -import ( - "context" - "fmt" - "time" - - "github.com/cloudscale-ch/cloudscale-go-sdk/v2" - cloudscalev1 "github.com/vshn/provider-cloudscale/apis/cloudscale/v1" - corev1 "k8s.io/api/core/v1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - organizationLabel = "appuio.io/organization" - namespaceLabel = "crossplane.io/claim-namespace" -) - -// AccumulateKey represents one data point ("fact") in the billing database. -// The actual value for the data point is not present in this type, as this type is just a map key, and the corresponding value is stored as a map value. -type AccumulateKey struct { - Query string - Zone string - Tenant string - Namespace string - Start time.Time -} - -// String returns the full "source" string as used by the appuio-cloud-reporting -func (k AccumulateKey) String() string { - return k.Query + ":" + k.Zone + ":" + k.Tenant + ":" + k.Namespace -} - -// MarshalText implements encoding.TextMarshaler to be able to e.g. log the map with this key type. -func (k AccumulateKey) MarshalText() ([]byte, error) { - return []byte(k.String()), nil -} - -/* -accumulateBucketMetrics gets all the bucket metrics from cloudscale and puts them into a map. The map key is the "AccumulateKey", -and the value is the raw value of the data returned by cloudscale (e.g. bytes, requests). In order to construct the -correct AccumulateKey, this function needs to fetch the namespace and bucket custom resources, because that's where the tenant is stored. -This method is "accumulating" data because it collects data from possibly multiple ObjectsUsers under the same -AccumulateKey. This is because the billing system can't handle multiple ObjectsUsers per namespace. -*/ -func accumulateBucketMetrics(ctx context.Context, date time.Time, cloudscaleClient *cloudscale.Client, k8sclient client.Client) (map[AccumulateKey]uint64, error) { - logger := ctrl.LoggerFrom(ctx) - - logger.V(1).Info("fetching bucket metrics from cloudscale", "date", date) - - bucketMetricsRequest := cloudscale.BucketMetricsRequest{Start: date, End: date} - bucketMetrics, err := cloudscaleClient.Metrics.GetBucketMetrics(ctx, &bucketMetricsRequest) - if err != nil { - return nil, err - } - - logger.V(1).Info("fetching namespaces") - - nsTenants, err := fetchNamespaces(ctx, k8sclient) - if err != nil { - return nil, err - } - - logger.V(1).Info("fetching buckets") - - buckets, err := fetchBuckets(ctx, k8sclient) - if err != nil { - return nil, err - } - - accumulated := make(map[AccumulateKey]uint64) - - for _, bucketMetricsData := range bucketMetrics.Data { - name := bucketMetricsData.Subject.BucketName - logger := logger.WithValues("bucket", name) - ns, ok := buckets[name] - if !ok { - logger.Info("unable to sync bucket, ObjectBucket not found") - continue - } - tenant, ok := nsTenants[ns] - if !ok { - logger.Info("unable to sync bucket, no tenant mapping available for namespace", "namespace", ns) - continue - } - err = accumulateBucketMetricsForObjectsUser(accumulated, bucketMetricsData, tenant, ns) - if err != nil { - logger.Error(err, "unable to sync bucket", "namespace", ns) - continue - } - logger.V(1).Info("accumulated bucket metrics", "namespace", ns, "tenant", tenant, "accumulated", accumulated) - } - - return accumulated, nil -} - -func fetchBuckets(ctx context.Context, k8sclient client.Client) (map[string]string, error) { - buckets := &cloudscalev1.BucketList{} - if err := k8sclient.List(ctx, buckets, client.HasLabels{namespaceLabel}); err != nil { - return nil, fmt.Errorf("bucket list: %w", err) - } - - bucketNS := map[string]string{} - for _, b := range buckets.Items { - bucketNS[b.GetBucketName()] = b.Labels[namespaceLabel] - } - return bucketNS, nil -} - -func fetchNamespaces(ctx context.Context, k8sclient client.Client) (map[string]string, error) { - namespaces := &corev1.NamespaceList{} - if err := k8sclient.List(ctx, namespaces, client.HasLabels{organizationLabel}); err != nil { - return nil, fmt.Errorf("namespace list: %w", err) - } - - nsTenants := map[string]string{} - for _, ns := range namespaces.Items { - nsTenants[ns.Name] = ns.Labels[organizationLabel] - } - return nsTenants, nil -} - -func accumulateBucketMetricsForObjectsUser(accumulated map[AccumulateKey]uint64, bucketMetricsData cloudscale.BucketMetricsData, tenant, namespace string) error { - if len(bucketMetricsData.TimeSeries) != 1 { - return fmt.Errorf("there must be exactly one metrics data point, found %d", len(bucketMetricsData.TimeSeries)) - } - - // For now all the buckets have the same zone. This may change in the future if Cloudscale decides to have different - // prices for different locations. - zone := sourceZones[0] - - sourceStorage := AccumulateKey{ - Query: sourceQueryStorage, - Zone: zone, - Tenant: tenant, - Namespace: namespace, - Start: bucketMetricsData.TimeSeries[0].Start, - } - sourceTrafficOut := AccumulateKey{ - Query: sourceQueryTrafficOut, - Zone: zone, - Tenant: tenant, - Namespace: namespace, - Start: bucketMetricsData.TimeSeries[0].Start, - } - sourceRequests := AccumulateKey{ - Query: sourceQueryRequests, - Zone: zone, - Tenant: tenant, - Namespace: namespace, - Start: bucketMetricsData.TimeSeries[0].Start, - } - - accumulated[sourceStorage] += uint64(bucketMetricsData.TimeSeries[0].Usage.StorageBytes) - accumulated[sourceTrafficOut] += uint64(bucketMetricsData.TimeSeries[0].Usage.SentBytes) - accumulated[sourceRequests] += uint64(bucketMetricsData.TimeSeries[0].Usage.Requests) - - return nil -} diff --git a/pkg/cloudscale/accumulate_test.go b/pkg/cloudscale/accumulate_test.go deleted file mode 100644 index 466e35c..0000000 --- a/pkg/cloudscale/accumulate_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package cloudscale - -import ( - "fmt" - "testing" - "time" - - "github.com/cloudscale-ch/cloudscale-go-sdk/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// assertEqualfUint64 implements the functionality of assert.Equalf for uint64, because assert.Equalf cannot print uint64 correctly. -// See https://github.com/stretchr/testify/issues/400 -func assertEqualfUint64(t *testing.T, expected uint64, actual uint64, msg string, args ...interface{}) bool { - if expected != actual { - return assert.Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %d\n"+ - "actual : %d", expected, actual)) - } - return true -} - -func TestAccumulateBucketMetricsForObjectsUser(t *testing.T) { - zone := "cloudscale" - organization := "inity" - namespace := "testnamespace" - - location, err := time.LoadLocation("Europe/Zurich") - assert.NoError(t, err) - - now := time.Now().In(location) - date := time.Date(now.Year(), now.Month(), now.Day()-1, 0, 0, 0, 0, now.Location()) - - // build input data structure - bucketMetricsInterval := []cloudscale.BucketMetricsInterval{ - { - Start: date, - End: date, - Usage: cloudscale.BucketMetricsIntervalUsage{ - Requests: 5, - StorageBytes: 1000000, - SentBytes: 2000000, - }, - }, - } - bucketMetricsData := cloudscale.BucketMetricsData{ - TimeSeries: bucketMetricsInterval, - } - - accumulated := make(map[AccumulateKey]uint64) - assert.NoError(t, accumulateBucketMetricsForObjectsUser(accumulated, bucketMetricsData, organization, namespace)) - - require.Len(t, accumulated, 3, "incorrect amount of values 'accumulated'") - - key := AccumulateKey{ - Zone: zone, - Tenant: organization, - Namespace: namespace, - Start: date, - } - - key.Query = "appcat_object-storage-requests" - assertEqualfUint64(t, uint64(5), accumulated[key], "incorrect value in %s", key) - - key.Query = "appcat_object-storage-storage" - assertEqualfUint64(t, uint64(1000000), accumulated[key], "incorrect value in %s", key) - - key.Query = "appcat_object-storage-traffic-out" - assertEqualfUint64(t, uint64(2000000), accumulated[key], "incorrect value in %s", key) -} diff --git a/pkg/cloudscale/fixtures.go b/pkg/cloudscale/fixtures.go index f08ddc3..86a9b0a 100644 --- a/pkg/cloudscale/fixtures.go +++ b/pkg/cloudscale/fixtures.go @@ -1,17 +1,13 @@ package cloudscale -import ( - "database/sql" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" -) +import "github.com/vshn/billing-collector-cloudservices/pkg/odoo" const ( // source format: 'query:zone:tenant:namespace' or 'query:zone:tenant:namespace:class' // We do not have real (prometheus) queries here, just random hardcoded strings. - sourceQueryStorage = "appcat_object-storage-storage" - sourceQueryTrafficOut = "appcat_object-storage-traffic-out" - sourceQueryRequests = "appcat_object-storage-requests" + productIdStorage = "appcat-cloudscale-object-storage-storage" + productIdTrafficOut = "appcat-cloudscale-object-storage-traffic-out" + productIdQueryRequests = "appcat_object-storage-requests" ) var ( @@ -20,79 +16,8 @@ var ( sourceZones = []string{"cloudscale"} ) -var ( - ensureProducts = []*db.Product{ - { - Source: sourceQueryStorage + ":" + sourceZones[0], - Target: sql.NullString{String: "1401", Valid: true}, - Amount: 0.0033, // this is per DAY, equals 0.099 per GB per month - Unit: "GBDay", // SI GB according to cloudscale - During: db.InfiniteRange(), - }, - { - Source: sourceQueryTrafficOut + ":" + sourceZones[0], - Target: sql.NullString{String: "1403", Valid: true}, - Amount: 0.022, - Unit: "GB", // SI GB according to cloudscale - During: db.InfiniteRange(), - }, - { - Source: sourceQueryRequests + ":" + sourceZones[0], - Target: sql.NullString{String: "1405", Valid: true}, - Amount: 0.0055, - Unit: "KReq", - During: db.InfiniteRange(), - }, - } -) -var ( - ensureDiscounts = []*db.Discount{ - { - Source: sourceQueryStorage, - Discount: 0, - During: db.InfiniteRange(), - }, - { - Source: sourceQueryTrafficOut, - Discount: 0, - During: db.InfiniteRange(), - }, - { - Source: sourceQueryRequests, - Discount: 0, - During: db.InfiniteRange(), - }, - } -) - var units = map[string]string{ - sourceQueryStorage: "GBDay", - sourceQueryTrafficOut: "GB", - sourceQueryRequests: "KReq", + productIdStorage: odoo.GBDay, + productIdTrafficOut: odoo.GB, + productIdQueryRequests: odoo.KReq, } - -var ( - ensureQueries = []*db.Query{ - { - Name: sourceQueryStorage + ":" + sourceZones[0], - Description: "Object Storage - Storage (cloudscale.ch)", - Query: "", - Unit: units[sourceQueryStorage], - During: db.InfiniteRange(), - }, - { - Name: sourceQueryTrafficOut + ":" + sourceZones[0], - Description: "Object Storage - Traffic Out (cloudscale.ch)", - Query: "", - Unit: units[sourceQueryTrafficOut], - During: db.InfiniteRange(), - }, - { - Name: sourceQueryRequests + ":" + sourceZones[0], - Description: "Object Storage - Requests (cloudscale.ch)", - Query: "", - Unit: units[sourceQueryRequests], - During: db.InfiniteRange(), - }, - } -) diff --git a/pkg/cloudscale/objecstorage_integration_test.go b/pkg/cloudscale/objecstorage_integration_test.go deleted file mode 100644 index a63f41e..0000000 --- a/pkg/cloudscale/objecstorage_integration_test.go +++ /dev/null @@ -1,199 +0,0 @@ -//go:build integration - -package cloudscale - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/cloudscale-ch/cloudscale-go-sdk/v2" - "github.com/jmoiron/sqlx" - "github.com/stretchr/testify/suite" - "github.com/vshn/billing-collector-cloudservices/pkg/reporting" - "github.com/vshn/billing-collector-cloudservices/pkg/test" - cloudscalev1 "github.com/vshn/provider-cloudscale/apis/cloudscale/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type ObjectStorageTestSuite struct { - test.Suite - billingDate time.Time -} - -func (ts *ObjectStorageTestSuite) SetupSuite() { - cloudscaleCRDsPath := os.Getenv("CLOUDSCALE_CRDS_PATH") - ts.Require().NotZero(cloudscaleCRDsPath, "missing env variable CLOUDSCALE_CRDS_PATH") - - ts.SetupEnv([]string{cloudscaleCRDsPath}) - - ts.RegisterScheme(cloudscalev1.SchemeBuilder.AddToScheme) -} - -// TestMetrics sets up a couple of buckets and associated namespaces with organizations set. -// The cloudscale client is set up with an HTTP replay recorder (go-vcr) which looks into testdata/ for recorded -// HTTP responses. -// For simplicity reasons, the recorder only uses URL and method for matching recorded responses. The upside -// of this is it doesn't matter when we execute the tests since the date used to fetch metrics doesn't matter for matching. -// Downside of course is it doesn't do any validation related to the date matching but that's not the main thing to test here. -func (ts *ObjectStorageTestSuite) TestMetrics() { - assert := ts.Assert() - ctx := ts.Context - - o, cancel := ts.setupObjectStorage() - defer cancel() - - expectedQuantities := map[AccumulateKey]float64{ - AccumulateKey{ - Query: sourceQueryStorage, - Zone: sourceZones[0], - Tenant: "example-company", - Namespace: "example-project", - Start: ts.billingDate, - }: 1000.000004096, - AccumulateKey{ - Query: sourceQueryRequests, - Zone: sourceZones[0], - Tenant: "example-company", - Namespace: "example-project", - Start: ts.billingDate, - }: 100.001, - AccumulateKey{ - Query: sourceQueryTrafficOut, - Zone: sourceZones[0], - Tenant: "example-company", - Namespace: "example-project", - Start: ts.billingDate, - }: 50.0, - AccumulateKey{ - Query: sourceQueryStorage, - Zone: sourceZones[0], - Tenant: "big-corporation", - Namespace: "next-big-thing", - Start: ts.billingDate, - }: 0, - AccumulateKey{ - Query: sourceQueryRequests, - Zone: sourceZones[0], - Tenant: "big-corporation", - Namespace: "next-big-thing", - Start: ts.billingDate, - }: 0.001, - AccumulateKey{ - Query: sourceQueryTrafficOut, - Zone: sourceZones[0], - Tenant: "big-corporation", - Namespace: "next-big-thing", - Start: ts.billingDate, - }: 0, - } - nameNsMap := map[string]string{ - "example-project-a": "example-project", - "example-project-b": "example-project", - "next-big-thing-a": "next-big-thing", - } - nsTenantMap := map[string]string{ - "example-project": "example-company", - "next-big-thing": "big-corporation", - } - ts.ensureBuckets(nameNsMap) - - createdNs := make(map[string]bool) - for _, ns := range nameNsMap { - if _, ok := createdNs[ns]; !ok { - ts.EnsureNS(ns, map[string]string{organizationLabel: nsTenantMap[ns]}) - createdNs[ns] = true - } - } - - assert.NoError(o.Execute(ctx)) - - store, err := reporting.NewStore(ts.DatabaseURL, ts.Logger) - assert.NoError(err) - defer func() { - assert.NoError(store.Close()) - }() - - // a bit pointless to use a transaction for checking the results but I wanted to avoid exposing something - // which should not be used outside test code. - assert.NoError(store.WithTransaction(ctx, func(tx *sqlx.Tx) error { - dt, err := reporting.GetDateTime(ctx, tx, ts.billingDate) - if !assert.NoError(err) || !assert.NotZero(dt) { - return fmt.Errorf("no dateTime found(%q): %w (nil? %v)", ts.billingDate, err, dt) - } - - for key, expectedQuantity := range expectedQuantities { - fact, err := ts.getFact(ctx, tx, ts.billingDate, dt, key) - assert.NoError(err, key) - if expectedQuantity == 0 { - assert.Nil(fact, "fact found but expectedQuantity was zero") - } else { - assert.NotNil(fact, key) - assert.Equal(expectedQuantity, fact.Quantity, key) - } - } - return nil - })) -} - -func (ts *ObjectStorageTestSuite) getFact(ctx context.Context, tx *sqlx.Tx, date time.Time, dt *db.DateTime, src AccumulateKey) (*db.Fact, error) { - record := reporting.Record{ - TenantSource: src.Tenant, - CategorySource: src.Zone + ":" + src.Namespace, - BillingDate: date, - ProductSource: src.String(), - DiscountSource: src.String(), - QueryName: src.Query + ":" + src.Zone, - } - return test.FactByRecord(ctx, tx, dt, record) -} - -func (ts *ObjectStorageTestSuite) ensureBuckets(nameNsMap map[string]string) { - resources := make([]client.Object, 0) - for name, ns := range nameNsMap { - resources = append(resources, &cloudscalev1.Bucket{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{namespaceLabel: ns}, - }, - Spec: cloudscalev1.BucketSpec{ - ForProvider: cloudscalev1.BucketParameters{BucketName: name}, - }, - }) - } - ts.EnsureResources(resources...) -} - -func (ts *ObjectStorageTestSuite) setupObjectStorage() (*ObjectStorage, func()) { - assert := ts.Assert() - httpClient, cancel, err := test.RequestRecorder(ts.T(), "testdata/cloudscale/"+ts.T().Name()) - assert.NoError(err) - - c := cloudscale.NewClient(httpClient) - // required to be set when recording new response. - if apiToken := os.Getenv("CLOUDSCALE_API_TOKEN"); apiToken != "" { - c.AuthToken = apiToken - ts.T().Log("API token set") - } else { - ts.T().Log("no API token provided") - } - - location, err := time.LoadLocation("Europe/Zurich") - assert.NoError(err) - - ts.billingDate = time.Date(2023, 1, 11, 0, 0, 0, 0, location) - o, err := NewObjectStorage(c, ts.Client, ts.DatabaseURL, ts.billingDate) - assert.NoError(err) - - return o, cancel -} - -// In order for 'go test' to run this suite, we need to create -// a normal test function and pass our suite to suite.Run -func TestObjectStorageTestSuite(t *testing.T) { - suite.Run(t, new(ObjectStorageTestSuite)) -} diff --git a/pkg/cloudscale/objectstorage.go b/pkg/cloudscale/objectstorage.go index c235ebd..214c8d8 100644 --- a/pkg/cloudscale/objectstorage.go +++ b/pkg/cloudscale/objectstorage.go @@ -6,95 +6,195 @@ import ( "fmt" "time" + "github.com/vshn/billing-collector-cloudservices/pkg/controlAPI" + "github.com/vshn/billing-collector-cloudservices/pkg/kubernetes" + "github.com/vshn/billing-collector-cloudservices/pkg/log" + "github.com/vshn/billing-collector-cloudservices/pkg/odoo" + cloudscalev1 "github.com/vshn/provider-cloudscale/apis/cloudscale/v1" + "github.com/cloudscale-ch/cloudscale-go-sdk/v2" - "github.com/vshn/billing-collector-cloudservices/pkg/reporting" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + k8s "sigs.k8s.io/controller-runtime/pkg/client" ) +type BucketDetail struct { + Namespace string + Zone string +} + type ObjectStorage struct { - client *cloudscale.Client - k8sClient client.Client - date time.Time - databaseURL string + client *cloudscale.Client + k8sClient k8s.Client + controlApiClient k8s.Client + salesOrder string + clusterId string + uomMapping map[string]string } -func NewObjectStorage(client *cloudscale.Client, k8sClient client.Client, databaseURL string, billingDate time.Time) (*ObjectStorage, error) { +const ( + namespaceLabel = "crossplane.io/claim-namespace" +) + +func NewObjectStorage(client *cloudscale.Client, k8sClient k8s.Client, controlApiClient k8s.Client, salesOrder, clusterId string, uomMapping map[string]string) (*ObjectStorage, error) { return &ObjectStorage{ - client: client, - k8sClient: k8sClient, - date: billingDate, - databaseURL: databaseURL, + client: client, + k8sClient: k8sClient, + controlApiClient: controlApiClient, + salesOrder: salesOrder, + clusterId: clusterId, + uomMapping: uomMapping, }, nil } -func (obj *ObjectStorage) Execute(ctx context.Context) error { - logger := ctrl.LoggerFrom(ctx) - s, err := reporting.NewStore(obj.databaseURL, logger.WithName("reporting-store")) - if err != nil { - return fmt.Errorf("reporting.NewStore: %w", err) - } - defer func() { - if err := s.Close(); err != nil { - logger.Error(err, "unable to close") - } - }() +func (o *ObjectStorage) GetMetrics(ctx context.Context, billingDate time.Time) ([]odoo.OdooMeteredBillingRecord, error) { + logger := log.Logger(ctx) - if err := obj.initialize(ctx, s); err != nil { - return err - } - accumulated, err := obj.accumulate(ctx) + logger.V(1).Info("fetching bucket metrics from cloudscale", "date", billingDate) + + bucketMetricsRequest := cloudscale.BucketMetricsRequest{Start: billingDate, End: billingDate} + bucketMetrics, err := o.client.Metrics.GetBucketMetrics(ctx, &bucketMetricsRequest) if err != nil { - return err + return nil, err } - return obj.save(ctx, s, accumulated) -} -func (obj *ObjectStorage) initialize(ctx context.Context, s *reporting.Store) error { - logger := ctrl.LoggerFrom(ctx) - if err := s.Initialize(ctx, ensureProducts, ensureDiscounts, ensureQueries); err != nil { - return fmt.Errorf("initialize: %w", err) + // Fetch organisations in case salesOrder is missing + var nsTenants map[string]string + if o.salesOrder == "" { + logger.V(1).Info("Sales order id is missing, fetching namespaces to get the associated org id") + nsTenants, err = kubernetes.FetchNamespaceWithOrganizationMap(ctx, o.k8sClient) + if err != nil { + return nil, err + } } - logger.Info("initialized reporting db") - return nil -} -func (obj *ObjectStorage) accumulate(ctx context.Context) (map[AccumulateKey]uint64, error) { - return accumulateBucketMetrics(ctx, obj.date, obj.client, obj.k8sClient) -} + logger.V(1).Info("fetching buckets") -func (obj *ObjectStorage) save(ctx context.Context, s *reporting.Store, accumulated map[AccumulateKey]uint64) error { - logger := ctrl.LoggerFrom(ctx) + buckets, err := fetchBuckets(ctx, o.k8sClient) + if err != nil { + return nil, err + } - for source, value := range accumulated { - logger := logger.WithValues("source", source) - if value == 0 { - logger.V(1).Info("skipping zero valued entry") + allRecords := make([]odoo.OdooMeteredBillingRecord, 0) + for _, bucketMetricsData := range bucketMetrics.Data { + name := bucketMetricsData.Subject.BucketName + logger = logger.WithValues("bucket", name) + bd, ok := buckets[name] + if !ok { + logger.Info("unable to sync bucket, ObjectBucket not found") continue } - logger.Info("accumulating source") - - quantity, err := convertUnit(units[source.Query], value) - if err != nil { - logger.Error(err, "convertUnit failed, skip entry", "unit", units[source.Query], value) - continue + appuioManaged := true + if o.salesOrder == "" { + appuioManaged = false + o.salesOrder, err = controlAPI.GetSalesOrder(ctx, o.controlApiClient, nsTenants[bd.Namespace]) + if err != nil { + logger.Error(err, "unable to sync bucket", "namespace", bd.Namespace) + continue + } } - - err = s.WriteRecord(ctx, reporting.Record{ - TenantSource: source.Tenant, - CategorySource: source.Zone + ":" + source.Namespace, - BillingDate: source.Start, - ProductSource: source.String(), - DiscountSource: source.String(), - QueryName: source.Query + ":" + source.Zone, - Value: quantity, - }) + records, err := o.createOdooRecord(bucketMetricsData, bd, appuioManaged) if err != nil { - logger.Error(err, "writeRecord failed, skip entry") + logger.Error(err, "unable to create Odoo Record", "namespace", bd.Namespace) continue } + allRecords = append(allRecords, records...) + logger.V(1).Info("Created Odoo records", "namespace", bd.Namespace, "records", records) + } + + return allRecords, nil +} + +func (o *ObjectStorage) createOdooRecord(bucketMetricsData cloudscale.BucketMetricsData, b BucketDetail, appuioManaged bool) ([]odoo.OdooMeteredBillingRecord, error) { + if len(bucketMetricsData.TimeSeries) != 1 { + return nil, fmt.Errorf("there must be exactly one metrics data point, found %d", len(bucketMetricsData.TimeSeries)) + } + + storageBytesValue, err := convertUnit(units[productIdStorage], uint64(bucketMetricsData.TimeSeries[0].Usage.StorageBytes)) + if err != nil { + return nil, err + } + trafficOutValue, err := convertUnit(units[productIdTrafficOut], uint64(bucketMetricsData.TimeSeries[0].Usage.SentBytes)) + if err != nil { + return nil, err + } + queryRequestsValue, err := convertUnit(units[productIdQueryRequests], uint64(bucketMetricsData.TimeSeries[0].Usage.SentBytes)) + if err != nil { + return nil, err + } + + itemGroup := "" + if appuioManaged { + itemGroup = fmt.Sprintf("APPUiO Managed - Zone: %s / Namespace: %s", o.clusterId, b.Namespace) + } else { + itemGroup = fmt.Sprintf("APPUiO Cloud - Zone: %s / Namespace: %s", o.clusterId, b.Namespace) } + instanceId := fmt.Sprintf("%s/%s", b.Zone, bucketMetricsData.Subject.BucketName) + + return []odoo.OdooMeteredBillingRecord{ + { + ProductID: productIdStorage, + InstanceID: instanceId, + ItemDescription: "AppCat Cloudscale ObjectStorage", + ItemGroupDescription: itemGroup, + SalesOrder: o.salesOrder, + UnitID: o.uomMapping[units[productIdStorage]], + ConsumedUnits: storageBytesValue, + TimeRange: odoo.TimeRange{ + From: bucketMetricsData.TimeSeries[0].Start, + To: bucketMetricsData.TimeSeries[0].End, + }, + }, + { + ProductID: productIdTrafficOut, + InstanceID: instanceId, + ItemDescription: "AppCat Cloudscale ObjectStorage", + ItemGroupDescription: itemGroup, + SalesOrder: o.salesOrder, + UnitID: o.uomMapping[units[productIdTrafficOut]], + ConsumedUnits: trafficOutValue, + TimeRange: odoo.TimeRange{ + From: bucketMetricsData.TimeSeries[0].Start, + To: bucketMetricsData.TimeSeries[0].End, + }, + }, + { + ProductID: productIdQueryRequests, + InstanceID: instanceId, + ItemDescription: "AppCat Cloudscale ObjectStorage", + ItemGroupDescription: itemGroup, + SalesOrder: o.salesOrder, + UnitID: o.uomMapping[units[productIdQueryRequests]], + ConsumedUnits: queryRequestsValue, + TimeRange: odoo.TimeRange{ + From: bucketMetricsData.TimeSeries[0].Start, + To: bucketMetricsData.TimeSeries[0].End, + }, + }, + }, nil +} + +func fetchBuckets(ctx context.Context, k8sclient client.Client) (map[string]BucketDetail, error) { + buckets := &cloudscalev1.BucketList{} + if err := k8sclient.List(ctx, buckets, client.HasLabels{namespaceLabel}); err != nil { + return nil, fmt.Errorf("bucket list: %w", err) + } + + bucketDetails := map[string]BucketDetail{} + for _, b := range buckets.Items { + var bd BucketDetail + bd.Namespace = b.Labels[namespaceLabel] + bd.Zone = b.Spec.ForProvider.Region + bucketDetails[b.GetBucketName()] = bd + + } + return bucketDetails, nil +} + +func CheckUnitExistence(mapping map[string]string) error { + if mapping[odoo.GB] == "" || mapping[odoo.GBDay] == "" || mapping[odoo.KReq] == "" { + return fmt.Errorf("missing UOM mapping %s, %s or %s", odoo.GB, odoo.GBDay, odoo.KReq) + } return nil } diff --git a/pkg/cmd/cloudscale.go b/pkg/cmd/cloudscale.go index e05112a..56c2eb7 100644 --- a/pkg/cmd/cloudscale.go +++ b/pkg/cmd/cloudscale.go @@ -3,105 +3,155 @@ package cmd import ( "fmt" "net/http" + "os" + "sync" "time" + "github.com/vshn/billing-collector-cloudservices/pkg/odoo" + "github.com/cloudscale-ch/cloudscale-go-sdk/v2" "github.com/urfave/cli/v2" cs "github.com/vshn/billing-collector-cloudservices/pkg/cloudscale" "github.com/vshn/billing-collector-cloudservices/pkg/kubernetes" "github.com/vshn/billing-collector-cloudservices/pkg/log" - ctrl "sigs.k8s.io/controller-runtime" ) +const defaultTextForRequiredFlags = "" +const defaultTextForOptionalFlags = "" + func CloudscaleCmds() *cli.Command { var ( - apiToken string - dbURL string - kubernetesServerToken string - kubernetesServerURL string - kubeconfig string - days int + apiToken string + kubeconfig string + controlApiUrl string + controlApiToken string + days int + collectInterval int + billingHour int + odooURL string + odooOauthTokenURL string + odooClientId string + odooClientSecret string + salesOrder string + clusterId string + uom string ) return &cli.Command{ Name: "cloudscale", Usage: "Collect metrics from cloudscale", Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "cloudscale-api-token", - EnvVars: []string{"CLOUDSCALE_API_TOKEN"}, - Required: true, - Usage: "API token for cloudscale", - Destination: &apiToken, - }, - &cli.StringFlag{ - Name: "database-url", - EnvVars: []string{"ACR_DB_URL"}, - Required: true, - Usage: "A PostgreSQL database URL where to save relevant metrics", - Destination: &dbURL, - }, - &cli.StringFlag{ - Name: "kubernetes-server-url", - EnvVars: []string{"KUBERNETES_SERVER_URL"}, - Required: true, - Usage: "A Kubernetes server URL from where to get the data from", - Destination: &kubernetesServerURL, - }, - &cli.StringFlag{ - Name: "kubernetes-server-token", - EnvVars: []string{"KUBERNETES_SERVER_TOKEN"}, - Required: true, - Usage: "A Kubernetes server token which can view buckets.cloudscale.crossplane.io resources", - Destination: &kubernetesServerToken, - }, - &cli.StringFlag{ - Name: "kubeconfig", - EnvVars: []string{"KUBECONFIG"}, - Usage: "Path to a kubeconfig file which will be used instead of url/token flags if set", - Destination: &kubeconfig, - }, - &cli.IntFlag{ - Name: "days", - EnvVars: []string{"DAYS"}, - Value: 1, - Usage: "Days of metrics to fetch since today", - Destination: &days, - }, + &cli.StringFlag{Name: "cloudscale-api-token", Usage: "API token for cloudscale", + EnvVars: []string{"CLOUDSCALE_API_TOKEN"}, Destination: &apiToken, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "kubeconfig", Usage: "Path to a kubeconfig file which will be used instead of url/token flags if set", + EnvVars: []string{"KUBECONFIG"}, Destination: &kubeconfig, Required: false, DefaultText: defaultTextForOptionalFlags}, + &cli.StringFlag{Name: "control-api-url", Usage: "URL of the APPUiO Cloud Control API", + EnvVars: []string{"CONTROL_API_URL"}, Destination: &controlApiUrl, Required: false, DefaultText: defaultTextForOptionalFlags}, + &cli.StringFlag{Name: "control-api-token", Usage: "Token of the APPUiO Cloud Control API", + EnvVars: []string{"CONTROL_API_TOKEN"}, Destination: &controlApiToken, Required: false, DefaultText: defaultTextForOptionalFlags}, + &cli.IntFlag{Name: "days", Usage: "Days of metrics to fetch since today, set to 0 to get current metrics", + EnvVars: []string{"DAYS"}, Destination: &days, Value: 1, Required: false, DefaultText: defaultTextForOptionalFlags}, + &cli.StringFlag{Name: "odoo-url", Usage: "URL of the Odoo Metered Billing API", + EnvVars: []string{"ODOO_URL"}, Destination: &odooURL, Value: "http://localhost:8080"}, + &cli.StringFlag{Name: "odoo-oauth-token-url", Usage: "Oauth Token URL to authenticate with Odoo metered billing API", + EnvVars: []string{"ODOO_OAUTH_TOKEN_URL"}, Destination: &odooOauthTokenURL, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "odoo-oauth-client-id", Usage: "Client ID of the oauth client to interact with Odoo metered billing API", + EnvVars: []string{"ODOO_OAUTH_CLIENT_ID"}, Destination: &odooClientId, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "odoo-oauth-client-secret", Usage: "Client secret of the oauth client to interact with Odoo metered billing API", + EnvVars: []string{"ODOO_OAUTH_CLIENT_SECRET"}, Destination: &odooClientSecret, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "appuio-managed-sales-order", Usage: "Sales order id to save in the billing record for APPUiO Managed only", + EnvVars: []string{"APPUIO_MANAGED_SALES_ORDER"}, Destination: &salesOrder, Required: false, DefaultText: defaultTextForOptionalFlags}, + &cli.StringFlag{Name: "cluster-id", Usage: "The cluster id to save in the billing record", + EnvVars: []string{"CLUSTER_ID"}, Destination: &clusterId, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "uom", Usage: "Unit of measure mapping between cloud services and Odoo16 in json format", + EnvVars: []string{"UOM"}, Destination: &uom, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.IntFlag{Name: "collect-interval", Usage: "How often to collect the metrics from the Cloud Service in hours - 1-23", + EnvVars: []string{"COLLECT_INTERVAL"}, Destination: &collectInterval, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.IntFlag{Name: "billing-hour", Usage: "At what time to start collect the metrics (ex 6 would start running from 6)", + EnvVars: []string{"BILLING_HOUR"}, Destination: &billingHour, Required: true, DefaultText: defaultTextForRequiredFlags}, }, Before: addCommandName, - Subcommands: []*cli.Command{ - { - Name: "objectstorage", - Usage: "Get metrics from object storage service", - Before: addCommandName, - Action: func(c *cli.Context) error { - logger := log.Logger(c.Context) - ctrl.SetLogger(logger) - - logger.Info("Creating cloudscale client") - cloudscaleClient := cloudscale.NewClient(http.DefaultClient) - cloudscaleClient.AuthToken = apiToken - - logger.Info("Creating k8s client") - k8sClient, err := kubernetes.NewClient(kubeconfig, kubernetesServerURL, kubernetesServerToken) - if err != nil { - return fmt.Errorf("k8s client: %w", err) - } + Action: func(c *cli.Context) error { + logger := log.Logger(c.Context) + var wg sync.WaitGroup - location, err := time.LoadLocation("Europe/Zurich") - if err != nil { - return fmt.Errorf("load loaction: %w", err) - } - now := time.Now().In(location) - billingDate := time.Date(now.Year(), now.Month(), now.Day()-days, 0, 0, 0, 0, now.Location()) + logger.Info("Checking UOM mappings") + mapping, err := odoo.LoadUOM(uom) + if err != nil { + return err + } + err = cs.CheckUnitExistence(mapping) + if err != nil { + return err + } + + logger.Info("Creating cloudscale client") + cloudscaleClient := cloudscale.NewClient(http.DefaultClient) + cloudscaleClient.AuthToken = apiToken + + logger.Info("Creating k8s client") + k8sClient, err := kubernetes.NewClient(kubeconfig, "", "") + if err != nil { + return fmt.Errorf("k8s client: %w", err) + } + + k8sControlClient, err := kubernetes.NewClient("", controlApiUrl, controlApiToken) + if err != nil { + return fmt.Errorf("k8s control client: %w", err) + } + + odooClient := odoo.NewOdooAPIClient(c.Context, odooURL, odooOauthTokenURL, odooClientId, odooClientSecret, logger) + + location, err := time.LoadLocation("Europe/Zurich") + if err != nil { + return fmt.Errorf("load loaction: %w", err) + } + + o, err := cs.NewObjectStorage(cloudscaleClient, k8sClient, k8sControlClient, salesOrder, clusterId, mapping) + if err != nil { + return fmt.Errorf("object storage: %w", err) + } + + if collectInterval < 1 || collectInterval > 23 { + // Set to run once a day after billingHour in case the collectInterval is out of boundaries + collectInterval = 23 + } + + wg.Add(1) + go func() { + for { + if time.Now().Hour() >= billingHour { + + billingDate := time.Now().In(location) + if days != 0 { + billingDate = time.Date(billingDate.Year(), billingDate.Month(), billingDate.Day()-days, 0, 0, 0, 0, billingDate.Location()) + } + + logger.V(1).Info("Running cloudscale collector") + metrics, err := o.GetMetrics(c.Context, billingDate) + if err != nil { + logger.Error(err, "could not collect cloudscale bucket metrics") + wg.Done() + } + + if len(metrics) == 0 { + logger.Info("No data to export to odoo", "date", billingDate) + time.Sleep(time.Hour) + continue + } - o, err := cs.NewObjectStorage(cloudscaleClient, k8sClient, dbURL, billingDate) - if err != nil { - return fmt.Errorf("object storage: %w", err) + logger.Info("Exporting data to Odoo", "billingHour", billingHour, "date", billingDate) + err = odooClient.SendData(metrics) + if err != nil { + logger.Error(err, "could not export cloudscale bucket metrics") + } + time.Sleep(time.Hour * time.Duration(collectInterval)) } - return o.Execute(c.Context) - }, - }, + time.Sleep(time.Hour) + } + }() + wg.Wait() + os.Exit(1) + return nil }, } } diff --git a/pkg/cmd/exoscale.go b/pkg/cmd/exoscale.go index 2a2ce5a..2ac7eab 100644 --- a/pkg/cmd/exoscale.go +++ b/pkg/cmd/exoscale.go @@ -2,18 +2,18 @@ package cmd import ( "fmt" + "os" + "sync" "time" + "github.com/vshn/billing-collector-cloudservices/pkg/odoo" + "github.com/urfave/cli/v2" "github.com/vshn/billing-collector-cloudservices/pkg/exoscale" "github.com/vshn/billing-collector-cloudservices/pkg/kubernetes" "github.com/vshn/billing-collector-cloudservices/pkg/log" - ctrl "sigs.k8s.io/controller-runtime" ) -// billingHour represents the hour when metrics are collected -const billingHour = 6 - func addCommandName(c *cli.Context) error { c.Context = log.NewLoggingContext(c.Context, log.Logger(c.Context).WithName(c.Command.Name)) return nil @@ -21,60 +21,56 @@ func addCommandName(c *cli.Context) error { func ExoscaleCmds() *cli.Command { var ( - secret string - accessKey string - dbURL string - kubernetesServerToken string - kubernetesServerURL string - kubeconfig string + secret string + accessKey string + kubeconfig string + controlApiUrl string + controlApiToken string + odooURL string + odooOauthTokenURL string + odooClientId string + odooClientSecret string + salesOrder string + clusterId string + uom string + // For dbaas in minutes + // For objectstorage in hours + // TODO: Fix this mess + collectInterval int + billingHour int ) return &cli.Command{ Name: "exoscale", Usage: "Collect metrics from exoscale", Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "exoscale-secret", - Aliases: []string{"s"}, - EnvVars: []string{"EXOSCALE_API_SECRET"}, - Required: true, - Usage: "The secret which has unrestricted SOS service access in an Exoscale organization", - Destination: &secret, - }, - &cli.StringFlag{ - Name: "exoscale-access-key", - Aliases: []string{"k"}, - EnvVars: []string{"EXOSCALE_API_KEY"}, - Required: true, - Usage: "A key which has unrestricted SOS service access in an Exoscale organization", - Destination: &accessKey, - }, - &cli.StringFlag{ - Name: "database-url", - EnvVars: []string{"ACR_DB_URL"}, - Required: true, - Usage: "A PostgreSQL database URL where to save relevant metrics", - Destination: &dbURL, - }, - &cli.StringFlag{ - Name: "kubernetes-server-url", - EnvVars: []string{"KUBERNETES_SERVER_URL"}, - Required: true, - Usage: "A Kubernetes server URL from where to get the data from", - Destination: &kubernetesServerURL, - }, - &cli.StringFlag{ - Name: "kubernetes-server-token", - EnvVars: []string{"KUBERNETES_SERVER_TOKEN"}, - Required: true, - Usage: "A Kubernetes server token which can view buckets.cloudscale.crossplane.io resources", - Destination: &kubernetesServerToken, - }, - &cli.StringFlag{ - Name: "kubeconfig", - EnvVars: []string{"KUBECONFIG"}, - Usage: "Path to a kubeconfig file which will be used instead of url/token flags if set", - Destination: &kubeconfig, - }, + &cli.StringFlag{Name: "exoscale-secret", Aliases: []string{"s"}, Usage: "The secret which has unrestricted SOS service access in an Exoscale organization", + EnvVars: []string{"EXOSCALE_API_SECRET"}, Destination: &secret, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "exoscale-access-key", Aliases: []string{"k"}, Usage: "A key which has unrestricted SOS service access in an Exoscale organization", + EnvVars: []string{"EXOSCALE_API_KEY"}, Destination: &accessKey, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "kubeconfig", Usage: "Path to a kubeconfig file which will be used instead of url/token flags if set", + EnvVars: []string{"KUBECONFIG"}, Destination: &kubeconfig, Required: false, DefaultText: defaultTextForOptionalFlags}, + &cli.StringFlag{Name: "control-api-url", Usage: "URL of the APPUiO Cloud Control API", + EnvVars: []string{"CONTROL_API_URL"}, Destination: &controlApiUrl, Required: false, DefaultText: defaultTextForOptionalFlags}, + &cli.StringFlag{Name: "control-api-token", Usage: "Token of the APPUiO Cloud Control API", + EnvVars: []string{"CONTROL_API_TOKEN"}, Destination: &controlApiToken, Required: false, DefaultText: defaultTextForOptionalFlags}, + &cli.StringFlag{Name: "odoo-url", Usage: "URL of the Odoo Metered Billing API", + EnvVars: []string{"ODOO_URL"}, Destination: &odooURL, Value: "http://localhost:8080"}, + &cli.StringFlag{Name: "odoo-oauth-token-url", Usage: "Oauth Token URL to authenticate with Odoo metered billing API", + EnvVars: []string{"ODOO_OAUTH_TOKEN_URL"}, Destination: &odooOauthTokenURL, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "odoo-oauth-client-id", Usage: "Client ID of the oauth client to interact with Odoo metered billing API", + EnvVars: []string{"ODOO_OAUTH_CLIENT_ID"}, Destination: &odooClientId, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "odoo-oauth-client-secret", Usage: "Client secret of the oauth client to interact with Odoo metered billing API", + EnvVars: []string{"ODOO_OAUTH_CLIENT_SECRET"}, Destination: &odooClientSecret, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "appuio-managed-sales-order", Usage: "Sales order for APPUiO Managed clusters", + EnvVars: []string{"APPUIO_MANAGED_SALES_ORDER"}, Destination: &salesOrder, Required: false, DefaultText: defaultTextForOptionalFlags}, + &cli.IntFlag{Name: "collect-interval", Usage: "How often to collect the metrics from the Cloud Service in hours - 1-23", + EnvVars: []string{"COLLECT_INTERVAL"}, Destination: &collectInterval, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.IntFlag{Name: "billing-hour", Usage: "At what time to start collect the metrics (ex 6 would start running from 6)", + EnvVars: []string{"BILLING_HOUR"}, Destination: &billingHour, Required: false, DefaultText: defaultTextForOptionalFlags}, + &cli.StringFlag{Name: "cluster-id", Usage: "The cluster id to save in the billing record", + EnvVars: []string{"CLUSTER_ID"}, Destination: &clusterId, Required: true, DefaultText: defaultTextForRequiredFlags}, + &cli.StringFlag{Name: "uom", Usage: "Unit of measure mapping between cloud services and Odoo16 in json format", + EnvVars: []string{"UOM"}, Destination: &uom, Required: true, DefaultText: defaultTextForRequiredFlags}, }, Before: addCommandName, Subcommands: []*cli.Command{ @@ -84,29 +80,78 @@ func ExoscaleCmds() *cli.Command { Before: addCommandName, Action: func(c *cli.Context) error { logger := log.Logger(c.Context) - ctrl.SetLogger(logger) + var wg sync.WaitGroup logger.Info("Creating Exoscale client") exoscaleClient, err := exoscale.NewClient(accessKey, secret) if err != nil { return fmt.Errorf("exoscale client: %w", err) } + logger.Info("Checking UOM mappings") + mapping, err := odoo.LoadUOM(uom) + if err != nil { + return err + } + err = exoscale.CheckObjectStorageUOMExistence(mapping) + if err != nil { + return err + } + logger.Info("Creating k8s client") - k8sClient, err := kubernetes.NewClient(kubeconfig, kubernetesServerURL, kubernetesServerToken) + k8sClient, err := kubernetes.NewClient(kubeconfig, "", "") if err != nil { return fmt.Errorf("k8s client: %w", err) } - now := time.Now().In(time.UTC) - previousDay := now.Day() - 1 - billingDate := time.Date(now.Year(), now.Month(), previousDay, billingHour, 0, 0, 0, now.Location()) + k8sControlClient, err := kubernetes.NewClient("", controlApiUrl, controlApiToken) + if err != nil { + return fmt.Errorf("k8s control client: %w", err) + } + + odooClient := odoo.NewOdooAPIClient(c.Context, odooURL, odooOauthTokenURL, odooClientId, odooClientSecret, logger) + + if collectInterval < 1 || collectInterval > 23 { + // Set to run once a day after billingHour in case the collectInterval is out of boundaries + collectInterval = 23 + } - o, err := exoscale.NewObjectStorage(exoscaleClient, k8sClient, dbURL, billingDate) + o, err := exoscale.NewObjectStorage(exoscaleClient, k8sClient, k8sControlClient, salesOrder, clusterId, mapping) if err != nil { - return fmt.Errorf("object storage: %w", err) + return fmt.Errorf("objectbucket service: %w", err) } - return o.Execute(c.Context) + + wg.Add(1) + go func() { + for { + if time.Now().Hour() >= billingHour { + + logger.Info("Collecting ObjectStorage metrics after", "hour", billingHour) + + metrics, err := o.GetMetrics(c.Context) + if err != nil { + logger.Error(err, "cannot execute objectstorage collector") + wg.Done() + } + if len(metrics) == 0 { + logger.Info("No data to export to odoo") + time.Sleep(time.Hour) + continue + } + logger.Info("Exporting data to Odoo", "time", time.Now()) + err = odooClient.SendData(metrics) + if err != nil { + logger.Error(err, "cannot export metrics") + } + time.Sleep(time.Hour * time.Duration(collectInterval)) + } + time.Sleep(time.Hour) + } + }() + + wg.Wait() + os.Exit(1) + return nil }, }, { @@ -115,27 +160,75 @@ func ExoscaleCmds() *cli.Command { Before: addCommandName, Action: func(c *cli.Context) error { logger := log.Logger(c.Context) - ctrl.SetLogger(logger) + var wg sync.WaitGroup logger.Info("Creating Exoscale client") exoscaleClient, err := exoscale.NewClient(accessKey, secret) if err != nil { return fmt.Errorf("exoscale client: %w", err) } + logger.Info("Checking UOM mappings") + mapping, err := odoo.LoadUOM(uom) + if err != nil { + return err + } + err = exoscale.CheckDBaaSUOMExistence(mapping) + if err != nil { + return err + } + logger.Info("Creating k8s client") - k8sClient, err := kubernetes.NewClient(kubeconfig, kubernetesServerURL, kubernetesServerToken) + k8sClient, err := kubernetes.NewClient(kubeconfig, "", "") if err != nil { return fmt.Errorf("k8s client: %w", err) } - billingDate := time.Now().In(time.UTC).Truncate(time.Hour) + k8sControlClient, err := kubernetes.NewClient("", controlApiUrl, controlApiToken) + if err != nil { + return fmt.Errorf("k8s control client: %w", err) + } + + odooClient := odoo.NewOdooAPIClient(c.Context, odooURL, odooOauthTokenURL, odooClientId, odooClientSecret, logger) + + if collectInterval < 1 || collectInterval > 24 { + // Set to run once a day after billingHour in case the collectInterval is out of boundaries + collectInterval = 1 + } - o, err := exoscale.NewDBaaS(exoscaleClient, k8sClient, dbURL, billingDate) + d, err := exoscale.NewDBaaS(exoscaleClient, k8sClient, k8sControlClient, collectInterval, salesOrder, clusterId, mapping) if err != nil { return fmt.Errorf("dbaas service: %w", err) } - return o.Execute(c.Context) + + wg.Add(1) + go func() { + for { + logger.Info("Collecting DBaaS metrics") + metrics, err := d.GetMetrics(c.Context) + if err != nil { + logger.Error(err, "cannot execute dbaas collector") + wg.Done() + } + + if len(metrics) == 0 { + logger.Info("No data to export to odoo", "time", time.Now()) + time.Sleep(time.Minute * time.Duration(collectInterval)) + continue + } + + logger.Info("Exporting data to Odoo", "time", time.Now()) + err = odooClient.SendData(metrics) + if err != nil { + logger.Error(err, "cannot export metrics") + } + time.Sleep(time.Minute * time.Duration(collectInterval)) + } + }() + + wg.Wait() + os.Exit(1) + return nil }, }, }, diff --git a/pkg/controlAPI/controlapi.go b/pkg/controlAPI/controlapi.go new file mode 100644 index 0000000..42cafed --- /dev/null +++ b/pkg/controlAPI/controlapi.go @@ -0,0 +1,22 @@ +package controlAPI + +import ( + "context" + "fmt" + + orgv1 "github.com/appuio/control-api/apis/organization/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func GetSalesOrder(ctx context.Context, k8sClient client.Client, orgId string) (string, error) { + + org := &orgv1.Organization{} + err := k8sClient.Get(ctx, client.ObjectKey{Name: orgId}, org) + if err != nil { + return "", fmt.Errorf("cannot get Organization object '%s', err: %v", orgId, err) + } + if org.Status.SalesOrderName == "" { + return "", fmt.Errorf("Cannot get SalesOrder from organization object '%s', err: %v", orgId, err) + } + return org.Status.SalesOrderName, nil +} diff --git a/pkg/exofixtures/kafka.go b/pkg/exofixtures/kafka.go index f9be752..c8f9332 100644 --- a/pkg/exofixtures/kafka.go +++ b/pkg/exofixtures/kafka.go @@ -7,7 +7,7 @@ import ( ) // KafkaDBaaSType represents kafka DBaaS type -const KafkaDBaaSType ObjectType = "appcat_kafka" +const KafkaDBaaSType ObjectType = "kafka" // Available plans for Kafka var kafkaProductDBaaS = []productDBaaS{ diff --git a/pkg/exofixtures/mysql.go b/pkg/exofixtures/mysql.go index 3cbbae2..2bcc9e3 100644 --- a/pkg/exofixtures/mysql.go +++ b/pkg/exofixtures/mysql.go @@ -7,7 +7,7 @@ import ( ) // MysqlDBaaSType represents mysql DBaaS type -const MysqlDBaaSType ObjectType = "appcat_mysql" +const MysqlDBaaSType ObjectType = "mysql" // Available plans for MySQL var mysqlProductDBaaS = []productDBaaS{ diff --git a/pkg/exofixtures/opensearch.go b/pkg/exofixtures/opensearch.go index 28f07d6..03b7cb2 100644 --- a/pkg/exofixtures/opensearch.go +++ b/pkg/exofixtures/opensearch.go @@ -7,7 +7,7 @@ import ( ) // OpensearchDBaaSType represents opensearch DBaaS type -const OpensearchDBaaSType ObjectType = "appcat_opensearch" +const OpensearchDBaaSType ObjectType = "opesearch" // Available plans for OpenSearch var opensearchProductDBaaS = []productDBaaS{ diff --git a/pkg/exofixtures/postgres.go b/pkg/exofixtures/postgres.go index 253c730..4cb20b6 100644 --- a/pkg/exofixtures/postgres.go +++ b/pkg/exofixtures/postgres.go @@ -7,7 +7,7 @@ import ( ) // PostgresDBaaSType represents postgres DBaaS type -const PostgresDBaaSType ObjectType = "appcat_postgres" +const PostgresDBaaSType ObjectType = "pg" // Available plans for PostgreSQL var postgresProductDBaaS = []productDBaaS{ diff --git a/pkg/exofixtures/redis.go b/pkg/exofixtures/redis.go index b03e2ca..79980fa 100644 --- a/pkg/exofixtures/redis.go +++ b/pkg/exofixtures/redis.go @@ -7,7 +7,7 @@ import ( ) // RedisDBaaSType represents redis DBaaS type -const RedisDBaaSType ObjectType = "appcat_redis" +const RedisDBaaSType ObjectType = "redis" // Available plans for Redis var redisProductDBaaS = []productDBaaS{ diff --git a/pkg/exofixtures/types.go b/pkg/exofixtures/types.go index 561fa58..5854e38 100644 --- a/pkg/exofixtures/types.go +++ b/pkg/exofixtures/types.go @@ -11,9 +11,8 @@ const ( Provider = "exoscale" // SosType represents object storage storage type - SosType ObjectType = "appcat_object-storage-storage" - QuerySos = string(SosType) + ":" + Provider - DefaultUnitSos = "GBDay" + SosType ObjectType = "appcat_object-storage-storage" + QuerySos = string(SosType) + ":" + Provider ) var ObjectStorage = InitConfig{ @@ -49,7 +48,7 @@ const ( defaultUnitDBaaS = "Instances" ) -// BillingTypes contains exoscale service types to Query billing Database types +// BillingTypes contains exoscale service types to ProductId billing Odoo types var BillingTypes = map[string]string{ "pg": queryDBaaSPostgres, "mysql": queryDBaaSMysql, @@ -175,3 +174,20 @@ func (ss DBaaSSourceString) GetQuery() string { func (ss DBaaSSourceString) GetSourceString() string { return strings.Join([]string{ss.Query, ss.Organization, ss.Namespace, ss.Plan}, ":") } + +func (ss DBaaSSourceString) GetCategoryString() string { + return Provider + ":" + ss.Namespace +} + +type SOSSourceString struct { + Namespace string + Organization string +} + +func (ss SOSSourceString) GetSourceString() string { + return string(SosType) + ":" + Provider + ":" + ss.Organization + ":" + ss.Namespace +} + +func (ss SOSSourceString) GetCategoryString() string { + return Provider + ":" + ss.Namespace +} diff --git a/pkg/exoscale/common.go b/pkg/exoscale/common.go index 2e7ee24..7a6fb6b 100644 --- a/pkg/exoscale/common.go +++ b/pkg/exoscale/common.go @@ -1,28 +1,25 @@ package exoscale import ( - "context" "encoding/base64" "fmt" "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - ctrl "sigs.k8s.io/controller-runtime" - k8s "sigs.k8s.io/controller-runtime/pkg/client" ) const ( - // organizationLabel represents the label used for organization when fetching the metrics - organizationLabel = "appuio.io/organization" // namespaceLabel represents the label used for namespace when fetching the metrics namespaceLabel = "crossplane.io/claim-namespace" ) +type Sourcer interface { + GetSourceString() string + GetCategoryString() string +} + // Aggregated contains information needed to save the metrics of the different resource types in the database type Aggregated struct { Key - Organization string + Source Sourcer // Value represents the aggregate amount by Key of used service Value float64 } @@ -59,31 +56,3 @@ func (k *Key) DecodeKey() (tokens []string, err error) { s := strings.Split(string(decodedKey), ";") return s, nil } - -func fetchNamespaceWithOrganizationMap(ctx context.Context, k8sClient k8s.Client) (map[string]string, error) { - logger := ctrl.LoggerFrom(ctx) - - gvk := schema.GroupVersionKind{ - Group: "", - Version: "v1", - Kind: "NamespaceList", - } - list := &metav1.PartialObjectMetadataList{} - list.SetGroupVersionKind(gvk) - - err := k8sClient.List(ctx, list) - if err != nil { - return nil, fmt.Errorf("cannot get namespace list: %w", err) - } - - namespaces := map[string]string{} - for _, ns := range list.Items { - org, ok := ns.GetLabels()[organizationLabel] - if !ok { - logger.Info("Organization label not found in namespace", "namespace", ns.GetName()) - continue - } - namespaces[ns.GetName()] = org - } - return namespaces, nil -} diff --git a/pkg/exoscale/dbaas.go b/pkg/exoscale/dbaas.go index 280edb7..b5288be 100644 --- a/pkg/exoscale/dbaas.go +++ b/pkg/exoscale/dbaas.go @@ -5,17 +5,19 @@ import ( "fmt" "time" - "github.com/appuio/appuio-cloud-reporting/pkg/db" egoscale "github.com/exoscale/egoscale/v2" - "github.com/vshn/billing-collector-cloudservices/pkg/exofixtures" - "github.com/vshn/billing-collector-cloudservices/pkg/reporting" + "github.com/vshn/billing-collector-cloudservices/pkg/controlAPI" + "github.com/vshn/billing-collector-cloudservices/pkg/kubernetes" + "github.com/vshn/billing-collector-cloudservices/pkg/log" + "github.com/vshn/billing-collector-cloudservices/pkg/odoo" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" - ctrl "sigs.k8s.io/controller-runtime" k8s "sigs.k8s.io/controller-runtime/pkg/client" ) +const productIdPrefix = "appcat-exoscale" + var ( groupVersionKinds = map[string]schema.GroupVersionKind{ "pg": { @@ -44,6 +46,14 @@ var ( Kind: "KafkaList", }, } + + dbaasTypes = map[string]string{ + "pg": "PostgreSQL", + "mysql": "MySQL", + "opensearch": "OpenSearch", + "redis": "Redis", + "kafka": "Kafka", + } ) // Detail a helper structure for intermediate operations @@ -51,61 +61,31 @@ type Detail struct { Organization, DBName, Namespace, Plan, Zone, Kind string } -// DBaaS provides DBaaS Database info and required clients +// DBaaS provides DBaaS Odoo info and required clients type DBaaS struct { - exoscaleClient *egoscale.Client - k8sClient k8s.Client - databaseURL string - billingDate time.Time + exoscaleClient *egoscale.Client + k8sClient k8s.Client + controlApiClient k8s.Client + salesOrder string + clusterId string + collectInterval int + uomMapping map[string]string } // NewDBaaS creates a Service with the initial setup -func NewDBaaS(exoscaleClient *egoscale.Client, k8sClient k8s.Client, databaseURL string, billingDate time.Time) (*DBaaS, error) { +func NewDBaaS(exoscaleClient *egoscale.Client, k8sClient k8s.Client, controlApiClient k8s.Client, collectInterval int, salesOrder, clusterId string, uomMapping map[string]string) (*DBaaS, error) { return &DBaaS{ - exoscaleClient: exoscaleClient, - k8sClient: k8sClient, - databaseURL: databaseURL, - billingDate: billingDate, + exoscaleClient: exoscaleClient, + k8sClient: k8sClient, + controlApiClient: controlApiClient, + salesOrder: salesOrder, + clusterId: clusterId, + collectInterval: collectInterval, + uomMapping: uomMapping, }, nil } -// Execute executes the main business logic for this application by gathering, matching and saving data to the database -func (ds *DBaaS) Execute(ctx context.Context) error { - logger := ctrl.LoggerFrom(ctx) - - s, err := reporting.NewStore(ds.databaseURL, logger.WithName("reporting-store")) - if err != nil { - return fmt.Errorf("reporting.NewStore: %w", err) - } - defer func() { - if err := s.Close(); err != nil { - logger.Error(err, "unable to close") - } - }() - - if err := ds.initialize(ctx, s); err != nil { - return err - } - accumulated, err := ds.accumulate(ctx) - if err != nil { - return err - } - return ds.save(ctx, s, accumulated) -} - -func (ds *DBaaS) initialize(ctx context.Context, s *reporting.Store) error { - logger := ctrl.LoggerFrom(ctx) - - for t, fixtures := range exofixtures.DBaaS { - if err := s.Initialize(ctx, fixtures.Products, []*db.Discount{&fixtures.Discount}, []*db.Query{&fixtures.Query}); err != nil { - return fmt.Errorf("initialize(%q): %w", t, err) - } - } - logger.Info("initialized reporting db") - return nil -} - -func (ds *DBaaS) accumulate(ctx context.Context) (map[Key]Aggregated, error) { +func (ds *DBaaS) GetMetrics(ctx context.Context) ([]odoo.OdooMeteredBillingRecord, error) { detail, err := ds.fetchManagedDBaaSAndNamespaces(ctx) if err != nil { return nil, fmt.Errorf("fetchManagedDBaaSAndNamespaces: %w", err) @@ -116,31 +96,15 @@ func (ds *DBaaS) accumulate(ctx context.Context) (map[Key]Aggregated, error) { return nil, fmt.Errorf("fetchDBaaSUsage: %w", err) } - return aggregateDBaaS(ctx, usage, detail), nil -} - -func (ds *DBaaS) save(ctx context.Context, s *reporting.Store, aggregatedObjects map[Key]Aggregated) error { - logger := ctrl.LoggerFrom(ctx) - if len(aggregatedObjects) == 0 { - logger.Info("there are no DBaaS instances to be saved in the database") - return nil - } - for _, aggregated := range aggregatedObjects { - err := ds.ensureAggregatedServiceUsage(ctx, aggregated, s) - if err != nil { - logger.Error(err, "Cannot save aggregated DBaaS service record to billing database") - continue - } - } - return nil + return ds.AggregateDBaaS(ctx, usage, detail) } // fetchManagedDBaaSAndNamespaces fetches instances and namespaces from kubernetes cluster func (ds *DBaaS) fetchManagedDBaaSAndNamespaces(ctx context.Context) ([]Detail, error) { - logger := ctrl.LoggerFrom(ctx) + logger := log.Logger(ctx) logger.V(1).Info("Listing namespaces from cluster") - namespaces, err := fetchNamespaceWithOrganizationMap(ctx, ds.k8sClient) + namespaces, err := kubernetes.FetchNamespaceWithOrganizationMap(ctx, ds.k8sClient) if err != nil { return nil, fmt.Errorf("cannot list namespaces: %w", err) } @@ -170,7 +134,7 @@ func (ds *DBaaS) fetchManagedDBaaSAndNamespaces(ctx context.Context) ([]Detail, } func findDBaaSDetailInNamespacesMap(ctx context.Context, resource metav1.PartialObjectMetadata, gvk schema.GroupVersionKind, namespaces map[string]string) *Detail { - logger := ctrl.LoggerFrom(ctx).WithValues("dbaas", resource.GetName()) + logger := log.Logger(ctx).WithValues("dbaas", resource.GetName()) namespace, exist := resource.GetLabels()[namespaceLabel] if !exist { @@ -191,6 +155,7 @@ func findDBaaSDetailInNamespacesMap(ctx context.Context, resource metav1.Partial Kind: gvk.Kind, Namespace: namespace, Organization: organization, + Zone: resource.GetAnnotations()["appcat.vshn.io/cloudzone"], } logger.V(1).Info("Added namespace and organization to DBaaS", "namespace", dbaasDetail.Namespace, "organization", dbaasDetail.Organization) @@ -199,7 +164,7 @@ func findDBaaSDetailInNamespacesMap(ctx context.Context, resource metav1.Partial // fetchDBaaSUsage gets DBaaS service usage from Exoscale func (ds *DBaaS) fetchDBaaSUsage(ctx context.Context) ([]*egoscale.DatabaseService, error) { - logger := ctrl.LoggerFrom(ctx) + logger := log.Logger(ctx) logger.Info("Fetching DBaaS usage from Exoscale") var databaseServices []*egoscale.DatabaseService @@ -214,9 +179,9 @@ func (ds *DBaaS) fetchDBaaSUsage(ctx context.Context) ([]*egoscale.DatabaseServi return databaseServices, nil } -// aggregateDBaaS aggregates DBaaS services by namespaces and plan -func aggregateDBaaS(ctx context.Context, exoscaleDBaaS []*egoscale.DatabaseService, dbaasDetails []Detail) map[Key]Aggregated { - logger := ctrl.LoggerFrom(ctx) +// AggregateDBaaS aggregates DBaaS services by namespaces and plan +func (ds *DBaaS) AggregateDBaaS(ctx context.Context, exoscaleDBaaS []*egoscale.DatabaseService, dbaasDetails []Detail) ([]odoo.OdooMeteredBillingRecord, error) { + logger := log.Logger(ctx) logger.Info("Aggregating DBaaS instances by namespace and plan") // The DBaaS names are unique across DB types in an Exoscale organization. @@ -225,60 +190,61 @@ func aggregateDBaaS(ctx context.Context, exoscaleDBaaS []*egoscale.DatabaseServi dbaasServiceUsageMap[*usage.Name] = *usage } - aggregatedDBaasS := make(map[Key]Aggregated) + location, err := time.LoadLocation("Europe/Zurich") + if err != nil { + return nil, fmt.Errorf("load loaction: %w", err) + } + + now := time.Now().In(location) + billingDateStart := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()).In(time.UTC) + billingDateEnd := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()+1, 0, 0, 0, now.Location()).In(time.UTC) + + records := make([]odoo.OdooMeteredBillingRecord, 0) for _, dbaasDetail := range dbaasDetails { logger.V(1).Info("Checking DBaaS", "instance", dbaasDetail.DBName) dbaasUsage, exists := dbaasServiceUsageMap[dbaasDetail.DBName] if exists && dbaasDetail.Kind == groupVersionKinds[*dbaasUsage.Type].Kind { logger.V(1).Info("Found exoscale dbaas usage", "instance", dbaasUsage.Name, "instance created", dbaasUsage.CreatedAt) - key := NewKey(dbaasDetail.Namespace, *dbaasUsage.Plan, *dbaasUsage.Type) - aggregated := aggregatedDBaasS[key] - aggregated.Key = key - aggregated.Organization = dbaasDetail.Organization - aggregated.Value++ - aggregatedDBaasS[key] = aggregated + + itemGroup := fmt.Sprintf("APPUiO Managed - Zone: %s / Namespace: %s", ds.clusterId, dbaasDetail.Namespace) + instanceId := fmt.Sprintf("%s/%s", dbaasDetail.Zone, dbaasDetail.DBName) + if ds.salesOrder == "" { + itemGroup = fmt.Sprintf("APPUiO Cloud - Zone: %s / Namespace: %s", ds.clusterId, dbaasDetail.Namespace) + ds.salesOrder, err = controlAPI.GetSalesOrder(ctx, ds.controlApiClient, dbaasDetail.Organization) + if err != nil { + logger.Error(err, "Unable to sync DBaaS, cannot get salesOrder", "namespace", dbaasDetail.Namespace) + continue + } + } + + o := odoo.OdooMeteredBillingRecord{ + ProductID: productIdPrefix + fmt.Sprintf("-%s-%s", *dbaasUsage.Type, *dbaasUsage.Plan), + InstanceID: instanceId, + ItemDescription: "Exoscale DBaaS " + dbaasTypes[*dbaasUsage.Type], + ItemGroupDescription: itemGroup, + SalesOrder: ds.salesOrder, + UnitID: ds.uomMapping[odoo.InstanceHour], + ConsumedUnits: 1, + TimeRange: odoo.TimeRange{ + From: billingDateStart, + To: billingDateEnd, + }, + } + + records = append(records, o) + } else { logger.Info("Could not find any DBaaS on exoscale", "instance", dbaasDetail.DBName) } } - return aggregatedDBaasS + return records, nil } -// ensureAggregatedServiceUsage saves the aggregated database service usage by namespace and plan to the billing database -// To save the correct data to the database the function also matches a relevant product, Discount (if any) and Query. -func (ds *DBaaS) ensureAggregatedServiceUsage(ctx context.Context, aggregatedDatabaseService Aggregated, s *reporting.Store) error { - logger := ctrl.LoggerFrom(ctx) - - tokens, err := aggregatedDatabaseService.DecodeKey() - if err != nil { - return fmt.Errorf("cannot decode key namespace-plan-dbtype - %s, organization %s, number of instances %f: %w", - aggregatedDatabaseService.Key, - aggregatedDatabaseService.Organization, - aggregatedDatabaseService.Value, - err) +func CheckDBaaSUOMExistence(mapping map[string]string) error { + if mapping[odoo.InstanceHour] == "" { + return fmt.Errorf("missing UOM mapping %s", odoo.InstanceHour) } - namespace := tokens[0] - plan := tokens[1] - dbType := tokens[2] - - logger.Info("Saving DBaaS usage", "namespace", namespace, "plan", plan, "type", dbType, "quantity", aggregatedDatabaseService.Value) - - sourceString := exofixtures.DBaaSSourceString{ - Query: exofixtures.BillingTypes[dbType], - Organization: aggregatedDatabaseService.Organization, - Namespace: namespace, - Plan: plan, - } - - return s.WriteRecord(ctx, reporting.Record{ - TenantSource: aggregatedDatabaseService.Organization, - CategorySource: exofixtures.Provider + ":" + namespace, - BillingDate: ds.billingDate, - ProductSource: sourceString.GetSourceString(), - DiscountSource: sourceString.GetSourceString(), - QueryName: sourceString.GetQuery(), - Value: aggregatedDatabaseService.Value, - }) + return nil } diff --git a/pkg/exoscale/dbaas_integration_test.go b/pkg/exoscale/dbaas_integration_test.go deleted file mode 100644 index c4646fc..0000000 --- a/pkg/exoscale/dbaas_integration_test.go +++ /dev/null @@ -1,218 +0,0 @@ -//go:build integration - -package exoscale - -import ( - "context" - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/exoscale/egoscale/v2/oapi" - "github.com/jmoiron/sqlx" - "github.com/stretchr/testify/suite" - "github.com/vshn/billing-collector-cloudservices/pkg/exofixtures" - "github.com/vshn/billing-collector-cloudservices/pkg/reporting" - "github.com/vshn/billing-collector-cloudservices/pkg/test" - exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type DBaaSTestSuite struct { - test.Suite - billingDate time.Time -} - -func (ts *DBaaSTestSuite) SetupSuite() { - exoscaleCRDPaths := os.Getenv("EXOSCALE_CRDS_PATH") - ts.Require().NotZero(exoscaleCRDPaths, "missing env variable EXOSCALE_CRDS_PATH") - - ts.SetupEnv([]string{exoscaleCRDPaths}) - - ts.RegisterScheme(exoscalev1.SchemeBuilder.AddToScheme) -} - -func (ts *DBaaSTestSuite) TestMetrics() { - assert := ts.Assert() - ctx := ts.Context - - ds, cancel := ts.setupDBaaS() - defer cancel() - - type testcase struct { - gvk schema.GroupVersionKind - ns string - plan string - dbType oapi.DbaasServiceTypeName - } - - nsTenantMap := map[string]string{ - "example-project": "example-company", - "next-big-thing": "big-corporation", - } - for ns, tenant := range nsTenantMap { - ts.EnsureNS(ns, map[string]string{organizationLabel: tenant}) - } - - tests := make(map[string]testcase) - for key, gvk := range groupVersionKinds { - plan := "hobbyist-2" - // kafka has no hobbyist plan - if key == "kafka" { - plan = "startup-2" - } - tests[key+"-example-project"] = testcase{ - gvk: gvk, - ns: "example-project", - plan: plan, - dbType: oapi.DbaasServiceTypeName(key), - } - } - - tests["pg-expensive-example-project"] = testcase{ - gvk: groupVersionKinds["pg"], - ns: "example-project", - plan: "premium-225", - dbType: "pg", - } - - tests["pg-next-big-thing"] = testcase{ - gvk: groupVersionKinds["pg"], - ns: "next-big-thing", - plan: "business-225", - dbType: "pg", - } - - type expectation struct { - value float64 - tc testcase - } - expectedQuantities := make(map[Key]expectation, 0) - - for name, tc := range tests { - key := NewKey(tc.ns, tc.plan, string(tc.dbType)) - if _, ok := expectedQuantities[key]; !ok { - expectedQuantities[key] = expectation{ - value: 0, - tc: tc, - } - } - expectedQuantities[key] = expectation{ - value: expectedQuantities[key].value + 1, - tc: tc, - } - - obj := &unstructured.Unstructured{} - obj.SetUnstructuredContent(map[string]interface{}{ - "apiVersion": tc.gvk.GroupVersion().String(), - // a bit ugly, but I wanted to avoid adding more than necessary code - "kind": strings.Replace(tc.gvk.Kind, "List", "", 1), - "metadata": map[string]interface{}{ - "name": name, - "labels": map[string]string{ - namespaceLabel: tc.ns, - }, - }, - "spec": map[string]interface{}{ - "forProvider": map[string]interface{}{ - "zone": "ch-gva-2", - }, - }, - }) - ts.EnsureResources(obj) - } - - assert.NoError(ds.Execute(ctx)) - - store, err := reporting.NewStore(ts.DatabaseURL, ts.Logger) - assert.NoError(err) - defer func() { - assert.NoError(store.Close()) - }() - - // a bit pointless to use a transaction for checking the results but I wanted to avoid exposing something - // which should not be used outside test code. - assert.NoError(store.WithTransaction(ctx, func(tx *sqlx.Tx) error { - dt, err := reporting.GetDateTime(ctx, tx, ts.billingDate) - if !assert.NoError(err) || !assert.NotZero(dt) { - return fmt.Errorf("no dateTime found(%q): %w (nil? %v)", ts.billingDate, err, dt) - } - - for _, want := range expectedQuantities { - fact, err := ts.getFact(ctx, tx, ts.billingDate, dt, dbaasSource{ - dbType: string(want.tc.dbType), - tenant: nsTenantMap[want.tc.ns], - namespace: want.tc.ns, - plan: want.tc.plan, - }) - assert.NoError(err, want.tc.ns) - - assert.NotNil(fact, want.tc.ns) - assert.Equal(want.value, fact.Quantity, want.tc.ns) - } - return nil - })) -} - -type dbaasSource struct { - dbType string - tenant string - namespace string - plan string -} - -func (ts *DBaaSTestSuite) getFact(ctx context.Context, tx *sqlx.Tx, date time.Time, dt *db.DateTime, src dbaasSource) (*db.Fact, error) { - sourceString := exofixtures.DBaaSSourceString{ - Query: exofixtures.BillingTypes[src.dbType], - Organization: src.tenant, - Namespace: src.namespace, - Plan: src.plan, - } - record := reporting.Record{ - TenantSource: src.tenant, - CategorySource: exofixtures.Provider + ":" + src.namespace, - BillingDate: date, - ProductSource: sourceString.GetSourceString(), - DiscountSource: sourceString.GetSourceString(), - QueryName: sourceString.GetQuery(), - } - return test.FactByRecord(ctx, tx, dt, record) -} - -func (ts *DBaaSTestSuite) ensureBuckets(nameNsMap map[string]string) { - resources := make([]client.Object, 0) - for name, ns := range nameNsMap { - resources = append(resources, &exoscalev1.Bucket{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{namespaceLabel: ns}, - }, - Spec: exoscalev1.BucketSpec{ - ForProvider: exoscalev1.BucketParameters{BucketName: name}, - }, - }) - } - ts.EnsureResources(resources...) -} - -func (ts *DBaaSTestSuite) setupDBaaS() (*DBaaS, func()) { - exoClient, cancel, err := newEgoscaleClient(ts.T()) - ts.Assert().NoError(err) - - ts.billingDate = time.Date(2023, 1, 11, 6, 0, 0, 0, time.UTC) - ds, err := NewDBaaS(exoClient, ts.Client, ts.DatabaseURL, ts.billingDate) - ts.Assert().NoError(err) - return ds, cancel -} - -// In order for 'go test' to run this suite, we need to create -// a normal test function and pass our suite to suite.Run -func TestDBaaSTestSuite(t *testing.T) { - suite.Run(t, new(DBaaSTestSuite)) -} diff --git a/pkg/exoscale/dbaas_test.go b/pkg/exoscale/dbaas_test.go index 622c3f6..33066b5 100644 --- a/pkg/exoscale/dbaas_test.go +++ b/pkg/exoscale/dbaas_test.go @@ -2,37 +2,55 @@ package exoscale import ( "context" - "reflect" "testing" + "time" egoscale "github.com/exoscale/egoscale/v2" "github.com/stretchr/testify/assert" "github.com/vshn/billing-collector-cloudservices/pkg/exofixtures" + "github.com/vshn/billing-collector-cloudservices/pkg/log" + "github.com/vshn/billing-collector-cloudservices/pkg/odoo" ) func TestDBaaS_aggregatedDBaaS(t *testing.T) { - ctx := context.Background() + ctx := getTestContext(t) - key1 := NewKey("vshn-xyz", "hobbyist-2", string(exofixtures.PostgresDBaaSType)) - key2 := NewKey("vshn-abc", "business-128", string(exofixtures.PostgresDBaaSType)) + location, _ := time.LoadLocation("Europe/Zurich") - expectedAggregatedDBaaS := map[Key]Aggregated{ - key1: { - Key: key1, - Organization: "org1", - Value: 1, + now := time.Now().In(location) + record1 := odoo.OdooMeteredBillingRecord{ + ProductID: "appcat-exoscale-pg-hobbyist-2", + InstanceID: "ch-gva-2/postgres-abc", + ItemDescription: "Exoscale DBaaS PostgreSQL", + ItemGroupDescription: "APPUiO Managed - Zone: c-test1 / Namespace: vshn-xyz", + SalesOrder: "1234", + UnitID: "", + ConsumedUnits: 1, + TimeRange: odoo.TimeRange{ + From: time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()).In(time.UTC), + To: time.Date(now.Year(), now.Month(), now.Day(), now.Hour()+1, 0, 0, 0, now.Location()).In(time.UTC), }, - key2: { - Key: key2, - Organization: "org2", - Value: 1, + } + record2 := odoo.OdooMeteredBillingRecord{ + ProductID: "appcat-exoscale-pg-business-128", + InstanceID: "ch-gva-2/postgres-def", + ItemDescription: "Exoscale DBaaS PostgreSQL", + ItemGroupDescription: "APPUiO Managed - Zone: c-test1 / Namespace: vshn-uvw", + SalesOrder: "1234", + UnitID: "", + ConsumedUnits: 1, + TimeRange: odoo.TimeRange{ + From: time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()).In(time.UTC), + To: time.Date(now.Year(), now.Month(), now.Day(), now.Hour()+1, 0, 0, 0, now.Location()).In(time.UTC), }, } + expectedAggregatedOdooRecords := []odoo.OdooMeteredBillingRecord{record1, record2} + tests := map[string]struct { - dbaasDetails []Detail - exoscaleDBaaS []*egoscale.DatabaseService - expectedAggregatedDBaaS map[Key]Aggregated + dbaasDetails []Detail + exoscaleDBaaS []*egoscale.DatabaseService + expectedAggregatedOdooRecords []odoo.OdooMeteredBillingRecord }{ "given DBaaS details and Exoscale DBaasS, we should get the ExpectedAggregatedDBaasS": { dbaasDetails: []Detail{ @@ -41,12 +59,14 @@ func TestDBaaS_aggregatedDBaaS(t *testing.T) { DBName: "postgres-abc", Namespace: "vshn-xyz", Zone: "ch-gva-2", + Kind: "PostgreSQLList", }, { Organization: "org2", DBName: "postgres-def", - Namespace: "vshn-abc", + Namespace: "vshn-uvw", Zone: "ch-gva-2", + Kind: "PostgreSQLList", }, }, exoscaleDBaaS: []*egoscale.DatabaseService{ @@ -61,7 +81,7 @@ func TestDBaaS_aggregatedDBaaS(t *testing.T) { Plan: strToPointer("business-128"), }, }, - expectedAggregatedDBaaS: expectedAggregatedDBaaS, + expectedAggregatedOdooRecords: expectedAggregatedOdooRecords, }, "given DBaaS details and different names in Exoscale DBaasS, we should not get the ExpectedAggregatedDBaasS": { dbaasDetails: []Detail{ @@ -70,12 +90,14 @@ func TestDBaaS_aggregatedDBaaS(t *testing.T) { DBName: "postgres-abc", Namespace: "vshn-xyz", Zone: "ch-gva-2", + Kind: "PostgreSQLList", }, { Organization: "org2", DBName: "postgres-def", Namespace: "vshn-abc", Zone: "ch-gva-2", + Kind: "PostgreSQLList", }, }, exoscaleDBaaS: []*egoscale.DatabaseService{ @@ -89,14 +111,16 @@ func TestDBaaS_aggregatedDBaaS(t *testing.T) { }, }, - expectedAggregatedDBaaS: map[Key]Aggregated{}, + expectedAggregatedOdooRecords: []odoo.OdooMeteredBillingRecord{}, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - aggregatedDBaaS := aggregateDBaaS(ctx, tc.exoscaleDBaaS, tc.dbaasDetails) - assert.True(t, reflect.DeepEqual(tc.expectedAggregatedDBaaS, aggregatedDBaaS)) + ds, _ := NewDBaaS(nil, nil, nil, 1, "1234", "c-test1", map[string]string{}) + aggregatedOdooRecords, err := ds.AggregateDBaaS(ctx, tc.exoscaleDBaaS, tc.dbaasDetails) + assert.NoError(t, err) + assert.Equal(t, tc.expectedAggregatedOdooRecords, aggregatedOdooRecords) }) } } @@ -104,3 +128,10 @@ func TestDBaaS_aggregatedDBaaS(t *testing.T) { func strToPointer(s string) *string { return &s } + +func getTestContext(t assert.TestingT) context.Context { + logger, err := log.NewLogger("test", time.Now().String(), 1, "console") + assert.NoError(t, err, "cannot create logger") + ctx := log.NewLoggingContext(context.Background(), logger) + return ctx +} diff --git a/pkg/exoscale/objectstorage.go b/pkg/exoscale/objectstorage.go index a665a21..10a6179 100644 --- a/pkg/exoscale/objectstorage.go +++ b/pkg/exoscale/objectstorage.go @@ -3,127 +3,87 @@ package exoscale import ( "context" "fmt" - "strings" "time" - "github.com/appuio/appuio-cloud-reporting/pkg/db" egoscale "github.com/exoscale/egoscale/v2" "github.com/exoscale/egoscale/v2/oapi" + "github.com/vshn/billing-collector-cloudservices/pkg/controlAPI" "github.com/vshn/billing-collector-cloudservices/pkg/exofixtures" - "github.com/vshn/billing-collector-cloudservices/pkg/reporting" + "github.com/vshn/billing-collector-cloudservices/pkg/kubernetes" + "github.com/vshn/billing-collector-cloudservices/pkg/log" + "github.com/vshn/billing-collector-cloudservices/pkg/odoo" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" - ctrl "sigs.k8s.io/controller-runtime" + k8s "sigs.k8s.io/controller-runtime/pkg/client" ) +const productIdStorage = "appcat-exoscale-object-storage" + // ObjectStorage gathers bucket data from exoscale provider and cluster and saves to the database type ObjectStorage struct { - k8sClient k8s.Client - exoscaleClient *egoscale.Client - databaseURL string - billingDate time.Time + k8sClient k8s.Client + exoscaleClient *egoscale.Client + controlApiClient k8s.Client + salesOrder string + clusterId string + uomMapping map[string]string } // BucketDetail a k8s bucket object with relevant data type BucketDetail struct { - Organization, BucketName, Namespace string + Organization, BucketName, Namespace, Zone string } // NewObjectStorage creates an ObjectStorage with the initial setup -func NewObjectStorage(exoscaleClient *egoscale.Client, k8sClient k8s.Client, databaseURL string, billingDate time.Time) (*ObjectStorage, error) { +func NewObjectStorage(exoscaleClient *egoscale.Client, k8sClient k8s.Client, controlApiClient k8s.Client, salesOrder, clusterId string, uomMapping map[string]string) (*ObjectStorage, error) { return &ObjectStorage{ - exoscaleClient: exoscaleClient, - k8sClient: k8sClient, - databaseURL: databaseURL, - billingDate: billingDate, + k8sClient: k8sClient, + exoscaleClient: exoscaleClient, + controlApiClient: controlApiClient, + salesOrder: salesOrder, + clusterId: clusterId, + uomMapping: uomMapping, }, nil } -// Execute executes the main business logic for this application by gathering, matching and saving data to the database -func (o *ObjectStorage) Execute(ctx context.Context) error { - logger := ctrl.LoggerFrom(ctx) - s, err := reporting.NewStore(o.databaseURL, logger.WithName("reporting-store")) - if err != nil { - return fmt.Errorf("reporting.NewStore: %w", err) - } - defer func() { - if err := s.Close(); err != nil { - logger.Error(err, "unable to close") - } - }() - - if err := o.initialize(ctx, s); err != nil { - return err - } - accumulated, err := o.accumulate(ctx) - if err != nil { - return err - } - return o.save(ctx, s, accumulated) -} - -func (o *ObjectStorage) initialize(ctx context.Context, s *reporting.Store) error { - logger := ctrl.LoggerFrom(ctx) - - fixtures := exofixtures.ObjectStorage - if err := s.Initialize(ctx, fixtures.Products, []*db.Discount{&fixtures.Discount}, []*db.Query{&fixtures.Query}); err != nil { - return fmt.Errorf("initialize: %w", err) - } - logger.Info("initialized reporting db") - return nil -} - -func (o *ObjectStorage) accumulate(ctx context.Context) (map[Key]Aggregated, error) { +func (o *ObjectStorage) GetMetrics(ctx context.Context) ([]odoo.OdooMeteredBillingRecord, error) { detail, err := o.fetchManagedBucketsAndNamespaces(ctx) if err != nil { return nil, fmt.Errorf("fetchManagedBucketsAndNamespaces: %w", err) } - aggregated, err := o.getBucketUsage(ctx, detail) + + metrics, err := o.getBucketUsage(ctx, detail) if err != nil { return nil, fmt.Errorf("getBucketUsage: %w", err) } - return aggregated, nil -} - -func (o *ObjectStorage) save(ctx context.Context, s *reporting.Store, aggregatedObjects map[Key]Aggregated) error { - logger := ctrl.LoggerFrom(ctx) - - if len(aggregatedObjects) == 0 { - logger.Info("no buckets to be saved to the database") - return nil - } - - for _, aggregated := range aggregatedObjects { - err := o.ensureBucketUsage(ctx, s, aggregated) - if err != nil { - logger.Error(err, "cannot save aggregated buckets service record to billing database") - continue - } - } - return nil + return metrics, nil } // getBucketUsage gets bucket usage from Exoscale and matches them with the bucket from the cluster // If there are no buckets in Exoscale, the API will return an empty slice -func (o *ObjectStorage) getBucketUsage(ctx context.Context, bucketDetails []BucketDetail) (map[Key]Aggregated, error) { - logger := ctrl.LoggerFrom(ctx) +func (o *ObjectStorage) getBucketUsage(ctx context.Context, bucketDetails []BucketDetail) ([]odoo.OdooMeteredBillingRecord, error) { + logger := log.Logger(ctx) logger.Info("Fetching bucket usage from Exoscale") + resp, err := o.exoscaleClient.ListSosBucketsUsageWithResponse(ctx) if err != nil { return nil, err } - aggregatedBuckets := getAggregatedBuckets(ctx, *resp.JSON200.SosBucketsUsage, bucketDetails) - if len(aggregatedBuckets) == 0 { - logger.Info("There are no bucket usage to be saved in the database") + odooMetrics, err := o.getOdooMeteredBillingRecords(ctx, *resp.JSON200.SosBucketsUsage, bucketDetails) + if err != nil { + return nil, err + } + if len(odooMetrics) == 0 { + logger.Info("There are no bucket usage to be exported") return nil, nil } - return aggregatedBuckets, nil + return odooMetrics, nil } -func getAggregatedBuckets(ctx context.Context, sosBucketsUsage []oapi.SosBucketUsage, bucketDetails []BucketDetail) map[Key]Aggregated { - logger := ctrl.LoggerFrom(ctx) +func (o *ObjectStorage) getOdooMeteredBillingRecords(ctx context.Context, sosBucketsUsage []oapi.SosBucketUsage, bucketDetails []BucketDetail) ([]odoo.OdooMeteredBillingRecord, error) { + logger := log.Logger(ctx) logger.Info("Aggregating buckets by namespace") sosBucketsUsageMap := make(map[string]oapi.SosBucketUsage, len(sosBucketsUsage)) @@ -131,27 +91,61 @@ func getAggregatedBuckets(ctx context.Context, sosBucketsUsage []oapi.SosBucketU sosBucketsUsageMap[*usage.Name] = usage } - aggregatedBuckets := make(map[Key]Aggregated) + location, err := time.LoadLocation("Europe/Zurich") + if err != nil { + return nil, fmt.Errorf("load loaction: %w", err) + } + + now := time.Now().In(location) + billingDate := time.Date(now.Year(), now.Month(), now.Day()-1, 0, 0, 0, 0, now.Location()).In(time.UTC) + + aggregatedBuckets := make([]odoo.OdooMeteredBillingRecord, 0) for _, bucketDetail := range bucketDetails { logger.V(1).Info("Checking bucket", "bucket", bucketDetail.BucketName) if bucketUsage, exists := sosBucketsUsageMap[bucketDetail.BucketName]; exists { logger.V(1).Info("Found exoscale bucket usage", "bucket", bucketUsage.Name, "bucket size", bucketUsage.Name) - key := NewKey(bucketDetail.Namespace) - aggregatedBucket := aggregatedBuckets[key] - aggregatedBucket.Key = key - aggregatedBucket.Organization = bucketDetail.Organization - aggregatedBucket.Value += float64(*bucketUsage.Size) - aggregatedBuckets[key] = aggregatedBucket + value, err := adjustStorageSizeUnit(float64(*bucketUsage.Size)) + if err != nil { + return nil, err + } + + itemGroup := fmt.Sprintf("APPUiO Managed - Zone: %s / Namespace: %s", o.clusterId, bucketDetail.Namespace) + instanceId := fmt.Sprintf("%s/%s", bucketDetail.Zone, bucketDetail.BucketName) + if o.salesOrder == "" { + itemGroup = fmt.Sprintf("APPUiO Cloud - Zone: %s / Namespace: %s", o.clusterId, bucketDetail.Namespace) + o.salesOrder, err = controlAPI.GetSalesOrder(ctx, o.controlApiClient, bucketDetail.Organization) + if err != nil { + logger.Error(err, "unable to sync bucket", "namespace", bucketDetail.Namespace) + continue + } + } + + o := odoo.OdooMeteredBillingRecord{ + ProductID: productIdStorage, + InstanceID: instanceId, + ItemDescription: "AppCat Exoscale ObjectStorage", + ItemGroupDescription: itemGroup, + SalesOrder: o.salesOrder, + UnitID: o.uomMapping[odoo.GBDay], + ConsumedUnits: value, + TimeRange: odoo.TimeRange{ + From: billingDate, + To: billingDate.AddDate(0, 0, 1), + }, + } + + aggregatedBuckets = append(aggregatedBuckets, o) + } else { logger.Info("Could not find any bucket on exoscale", "bucket", bucketDetail.BucketName) } } - return aggregatedBuckets + return aggregatedBuckets, nil } func (o *ObjectStorage) fetchManagedBucketsAndNamespaces(ctx context.Context) ([]BucketDetail, error) { - logger := ctrl.LoggerFrom(ctx) + logger := log.Logger(ctx) logger.Info("Fetching buckets and namespaces from cluster") buckets := exoscalev1.BucketList{} @@ -162,7 +156,7 @@ func (o *ObjectStorage) fetchManagedBucketsAndNamespaces(ctx context.Context) ([ } logger.V(1).Info("Listing namespaces from cluster") - namespaces, err := fetchNamespaceWithOrganizationMap(ctx, o.k8sClient) + namespaces, err := kubernetes.FetchNamespaceWithOrganizationMap(ctx, o.k8sClient) if err != nil { return nil, fmt.Errorf("cannot list namespaces: %w", err) } @@ -171,13 +165,14 @@ func (o *ObjectStorage) fetchManagedBucketsAndNamespaces(ctx context.Context) ([ } func addOrgAndNamespaceToBucket(ctx context.Context, buckets exoscalev1.BucketList, namespaces map[string]string) []BucketDetail { - logger := ctrl.LoggerFrom(ctx) + logger := log.Logger(ctx) logger.V(1).Info("Gathering org and namespace from buckets") bucketDetails := make([]BucketDetail, 0, 10) for _, bucket := range buckets.Items { bucketDetail := BucketDetail{ BucketName: bucket.Spec.ForProvider.BucketName, + Zone: bucket.Spec.ForProvider.Zone, } if namespace, exist := bucket.ObjectMeta.Labels[namespaceLabel]; exist { organization, ok := namespaces[namespace] @@ -206,63 +201,17 @@ func addOrgAndNamespaceToBucket(ctx context.Context, buckets exoscalev1.BucketLi return bucketDetails } -// ensureBucketUsage saves the aggregated buckets usage by namespace to the billing database -// To save the correct data to the database the function also matches a relevant product, Discount (if any) and Query. -// The storage usage is referred to a day before the application ran (yesterday) -func (o *ObjectStorage) ensureBucketUsage(ctx context.Context, store *reporting.Store, aggregatedBucket Aggregated) error { - logger := ctrl.LoggerFrom(ctx) - - tokens, err := aggregatedBucket.DecodeKey() - if err != nil { - return fmt.Errorf("cannot decode key namespace-plan-dbtype - %s, organization %s, number of instances %f: %w", - aggregatedBucket.Key, - aggregatedBucket.Organization, - aggregatedBucket.Value, - err) - } - namespace := tokens[0] - - logger.Info("Saving buckets usage for namespace", "namespace", namespace, "storage used", aggregatedBucket.Value) - organization := aggregatedBucket.Organization - value := aggregatedBucket.Value - - sourceString := sosSourceString{ - ObjectType: exofixtures.SosType, - provider: exofixtures.Provider, +func CheckObjectStorageUOMExistence(mapping map[string]string) error { + if mapping[odoo.GBDay] == "" { + return fmt.Errorf("missing UOM mapping %s", odoo.GBDay) } - value, err = adjustStorageSizeUnit(value) - if err != nil { - return fmt.Errorf("adjustStorageSizeUnit(%v): %w", value, err) - } - - return store.WriteRecord(ctx, reporting.Record{ - TenantSource: organization, - CategorySource: exofixtures.Provider + ":" + namespace, - BillingDate: o.billingDate, - ProductSource: sourceString.getSourceString(), - DiscountSource: sourceString.getSourceString(), - QueryName: sourceString.getQuery(), - Value: value, - }) + return nil } func adjustStorageSizeUnit(value float64) (float64, error) { sosUnit := exofixtures.ObjectStorage.Query.Unit - if sosUnit == exofixtures.DefaultUnitSos { + if sosUnit == odoo.GBDay { return value / 1024 / 1024 / 1024, nil } return 0, fmt.Errorf("unknown Query unit %s", sosUnit) } - -type sosSourceString struct { - exofixtures.ObjectType - provider string -} - -func (ss sosSourceString) getQuery() string { - return strings.Join([]string{string(ss.ObjectType), ss.provider}, ":") -} - -func (ss sosSourceString) getSourceString() string { - return strings.Join([]string{string(ss.ObjectType), ss.provider}, ":") -} diff --git a/pkg/exoscale/objectstorage_integration_test.go b/pkg/exoscale/objectstorage_integration_test.go deleted file mode 100644 index 6153f11..0000000 --- a/pkg/exoscale/objectstorage_integration_test.go +++ /dev/null @@ -1,173 +0,0 @@ -//go:build integration - -package exoscale - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - egoscale "github.com/exoscale/egoscale/v2" - "github.com/jmoiron/sqlx" - "github.com/stretchr/testify/suite" - "github.com/vshn/billing-collector-cloudservices/pkg/exofixtures" - "github.com/vshn/billing-collector-cloudservices/pkg/reporting" - "github.com/vshn/billing-collector-cloudservices/pkg/test" - exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type ObjectStorageTestSuite struct { - test.Suite - billingDate time.Time -} - -func (ts *ObjectStorageTestSuite) SetupSuite() { - exoscaleCRDPaths := os.Getenv("EXOSCALE_CRDS_PATH") - ts.Require().NotZero(exoscaleCRDPaths, "missing env variable EXOSCALE_CRDS_PATH") - - ts.SetupEnv([]string{exoscaleCRDPaths}) - - ts.RegisterScheme(exoscalev1.SchemeBuilder.AddToScheme) -} - -type objectStorageSource struct { - namespace string - tenant string - objectType exofixtures.ObjectType - billingDate time.Time -} - -func (ts *ObjectStorageTestSuite) TestMetrics() { - assert := ts.Assert() - ctx := ts.Context - - o, cancel := ts.setupObjectStorage() - defer cancel() - - expectedQuantities := map[string]float64{ - "example-project": 932.253897190094, - "next-big-thing": 0, - } - nameNsMap := map[string]string{ - "example-project-a": "example-project", - "example-project-b": "example-project", - "next-big-thing-a": "next-big-thing", - } - nsTenantMap := map[string]string{ - "example-project": "example-company", - "next-big-thing": "big-corporation", - } - ts.ensureBuckets(nameNsMap) - - for ns, tenant := range nsTenantMap { - ts.EnsureNS(ns, map[string]string{organizationLabel: tenant}) - } - - assert.NoError(o.Execute(ctx)) - - store, err := reporting.NewStore(ts.DatabaseURL, ts.Logger) - assert.NoError(err) - defer func() { - assert.NoError(store.Close()) - }() - - // a bit pointless to use a transaction for checking the results but I wanted to avoid exposing something - // which should not be used outside test code. - assert.NoError(store.WithTransaction(ctx, func(tx *sqlx.Tx) error { - dt, err := reporting.GetDateTime(ctx, tx, ts.billingDate) - if !assert.NoError(err) || !assert.NotZero(dt) { - return fmt.Errorf("no dateTime found(%q): %w (nil? %v)", ts.billingDate, err, dt) - } - - for ns, expectedQuantity := range expectedQuantities { - fact, err := ts.getFact(ctx, tx, ts.billingDate, dt, objectStorageSource{ - namespace: ns, - tenant: nsTenantMap[ns], - objectType: exofixtures.SosType, - billingDate: ts.billingDate, - }) - assert.NoError(err, ns) - - assert.NotNil(fact, ns) - assert.Equal(expectedQuantity, fact.Quantity, ns) - } - return nil - })) -} - -func (ts *ObjectStorageTestSuite) getFact(ctx context.Context, tx *sqlx.Tx, date time.Time, dt *db.DateTime, src objectStorageSource) (*db.Fact, error) { - sourceString := sosSourceString{ - ObjectType: src.objectType, - provider: exofixtures.Provider, - } - record := reporting.Record{ - TenantSource: src.tenant, - CategorySource: exofixtures.Provider + ":" + src.namespace, - BillingDate: date, - ProductSource: sourceString.getSourceString(), - DiscountSource: sourceString.getSourceString(), - QueryName: sourceString.getQuery(), - } - return test.FactByRecord(ctx, tx, dt, record) -} - -func (ts *ObjectStorageTestSuite) ensureBuckets(nameNsMap map[string]string) { - resources := make([]client.Object, 0) - for name, ns := range nameNsMap { - resources = append(resources, &exoscalev1.Bucket{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{namespaceLabel: ns}, - }, - Spec: exoscalev1.BucketSpec{ - ForProvider: exoscalev1.BucketParameters{BucketName: name}, - }, - }) - } - ts.EnsureResources(resources...) -} - -func (ts *ObjectStorageTestSuite) setupObjectStorage() (*ObjectStorage, func()) { - exoClient, cancel, err := newEgoscaleClient(ts.T()) - ts.Assert().NoError(err) - - ts.billingDate = time.Date(2023, 1, 11, 6, 0, 0, 0, time.UTC) - o, err := NewObjectStorage(exoClient, ts.Client, ts.DatabaseURL, ts.billingDate) - ts.Assert().NoError(err) - return o, cancel -} - -func newEgoscaleClient(t *testing.T) (*egoscale.Client, func(), error) { - httpClient, cancel, err := test.RequestRecorder(t, "testdata/exoscale/"+t.Name()) - if err != nil { - return nil, nil, fmt.Errorf("request recorder: %w", err) - } - - apiKey := os.Getenv("EXOSCALE_API_KEY") - secret := os.Getenv("EXOSCALE_API_SECRET") - if apiKey != "" && secret != "" { - t.Log("api key & secret set") - } else { - // override empty values since otherwise egoscale complains - apiKey = "NOTVALID" - secret = "NOTVALIDSECRET" - t.Log("api key or secret not set") - } - - exoClient, err := NewClientWithOptions(apiKey, secret, egoscale.ClientOptWithHTTPClient(httpClient)) - if err != nil { - return nil, nil, fmt.Errorf("new client: %w", err) - } - return exoClient, cancel, nil -} - -// In order for 'go test' to run this suite, we need to create -// a normal test function and pass our suite to suite.Run -func TestObjectStorageTestSuite(t *testing.T) { - suite.Run(t, new(ObjectStorageTestSuite)) -} diff --git a/pkg/exoscale/objectstorage_test.go b/pkg/exoscale/objectstorage_test.go deleted file mode 100644 index 7f24fe3..0000000 --- a/pkg/exoscale/objectstorage_test.go +++ /dev/null @@ -1,193 +0,0 @@ -package exoscale - -import ( - "context" - "reflect" - "testing" - "time" - - "github.com/exoscale/egoscale/v2/oapi" - "github.com/stretchr/testify/assert" - exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestObjectStorage_GetAggregated(t *testing.T) { - defaultKey := NewKey("default") - alphaKey := NewKey("alpha") - omegaKey := NewKey("omega") - - tests := map[string]struct { - givenSosBucketsUsage []oapi.SosBucketUsage - givenBucketDetails []BucketDetail - expectedAggregated map[Key]Aggregated - }{ - "GivenSosBucketsUsageAndBuckets_WhenMatch_ThenExpectAggregatedObjects": { - givenSosBucketsUsage: []oapi.SosBucketUsage{ - createSosBucketUsage("bucket-test-1", 1), - createSosBucketUsage("bucket-test-2", 4), - createSosBucketUsage("bucket-test-3", 9), - createSosBucketUsage("bucket-test-4", 0), - createSosBucketUsage("bucket-test-5", 5), - }, - givenBucketDetails: []BucketDetail{ - createBucketDetail("bucket-test-1", "default", "orgA"), - createBucketDetail("bucket-test-2", "alpha", "orgB"), - createBucketDetail("bucket-test-3", "alpha", "orgB"), - createBucketDetail("bucket-test-4", "omega", "orgC"), - createBucketDetail("no-metrics-bucket", "beta", "orgD"), - }, - expectedAggregated: map[Key]Aggregated{ - defaultKey: createAggregated(defaultKey, "orgA", 1), - alphaKey: createAggregated(alphaKey, "orgB", 13), - omegaKey: createAggregated(omegaKey, "orgC", 0), - }, - }, - "GivenSosBucketsUsageAndBuckets_WhenMatch_ThenExpectNoAggregatedObjects": { - givenSosBucketsUsage: []oapi.SosBucketUsage{ - createSosBucketUsage("bucket-test-1", 1), - createSosBucketUsage("bucket-test-2", 4), - }, - givenBucketDetails: []BucketDetail{ - createBucketDetail("bucket-test-3", "default", "orgA"), - createBucketDetail("bucket-test-4", "alpha", "orgB"), - createBucketDetail("bucket-test-5", "alpha", "orgB"), - }, - expectedAggregated: map[Key]Aggregated{}, - }, - "GivenSosBucketsUsageAndBuckets_WhenSosBucketsUsageEmpty_ThenExpectNoAggregatedObjects": { - givenSosBucketsUsage: []oapi.SosBucketUsage{ - createSosBucketUsage("bucket-test-1", 1), - createSosBucketUsage("bucket-test-2", 4), - }, - givenBucketDetails: []BucketDetail{}, - expectedAggregated: map[Key]Aggregated{}, - }, - "GivenSosBucketsUsageAndBuckets_WhenNoBuckets_ThenExpectNoAggregatedObjects": { - givenSosBucketsUsage: []oapi.SosBucketUsage{}, - givenBucketDetails: []BucketDetail{ - createBucketDetail("bucket-test-3", "default", "orgA"), - createBucketDetail("bucket-test-4", "alpha", "orgB"), - }, - expectedAggregated: map[Key]Aggregated{}, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - // Given - ctx := context.Background() - - // When - aggregated := getAggregatedBuckets(ctx, tc.givenSosBucketsUsage, tc.givenBucketDetails) - - // Then - assert.True(t, reflect.DeepEqual(aggregated, tc.expectedAggregated)) - }) - } -} - -func TestObjectStorage_addOrgAndNamespaceToBucket(t *testing.T) { - tests := map[string]struct { - givenBucketList exoscalev1.BucketList - givenNamespaces map[string]string - expectedBucketDetails []BucketDetail - }{ - "GivenBucketListFromExoscale_WhenOrgAndNamespaces_ThenExpectBucketDetailObjects": { - givenBucketList: exoscalev1.BucketList{ - Items: []exoscalev1.Bucket{ - createBucket("bucket-1", "alpha", "orgA"), - createBucket("bucket-2", "beta", "orgB"), - createBucket("bucket-3", "alpha", "orgA"), - createBucket("bucket-4", "omega", "orgB"), - createBucket("bucket-5", "theta", "orgC"), - }, - }, - givenNamespaces: map[string]string{ - "alpha": "orgA", - "beta": "orgB", - "omega": "orgB", - "theta": "orgC", - }, - expectedBucketDetails: []BucketDetail{ - createBucketDetail("bucket-1", "alpha", "orgA"), - createBucketDetail("bucket-2", "beta", "orgB"), - createBucketDetail("bucket-3", "alpha", "orgA"), - createBucketDetail("bucket-4", "omega", "orgB"), - createBucketDetail("bucket-5", "theta", "orgC"), - }, - }, - "GivenBucketListFromExoscale_WhenNoOrgOrNamespaces_ThenExpectNoBucketDetailObjects": { - givenBucketList: exoscalev1.BucketList{ - Items: []exoscalev1.Bucket{ - createBucket("bucket-1", "", "orgA"), - createBucket("bucket-2", "beta", ""), - createBucket("bucket-3", "", ""), - }, - }, - givenNamespaces: map[string]string{}, - expectedBucketDetails: []BucketDetail{}, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - // Given - ctx := context.Background() - - // When - bucketDetails := addOrgAndNamespaceToBucket(ctx, tc.givenBucketList, tc.givenNamespaces) - - // Then - assert.ElementsMatch(t, tc.expectedBucketDetails, bucketDetails) - }) - } -} - -func createBucket(name, namespace, organization string) exoscalev1.Bucket { - labels := make(map[string]string) - if namespace != "" { - labels[namespaceLabel] = namespace - } - if organization != "" { - labels[organizationLabel] = organization - } - return exoscalev1.Bucket{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: labels, - }, - Spec: exoscalev1.BucketSpec{ - ForProvider: exoscalev1.BucketParameters{ - BucketName: name, - }, - }, - } -} - -func createAggregated(key Key, organization string, size float64) Aggregated { - return Aggregated{ - Key: key, - Organization: organization, - Value: size, - } -} - -func createBucketDetail(bucketName, namespace, organization string) BucketDetail { - return BucketDetail{ - Organization: organization, - BucketName: bucketName, - Namespace: namespace, - } -} - -func createSosBucketUsage(bucketName string, size int) oapi.SosBucketUsage { - date := time.Now() - actualSize := int64(size) - zone := oapi.ZoneName("ch-gva-2") - return oapi.SosBucketUsage{ - CreatedAt: &date, - Name: &bucketName, - Size: &actualSize, - ZoneName: &zone, - } -} diff --git a/pkg/exoscale/testdata/exoscale/TestObjectStorageTestSuite/TestMetrics.yaml b/pkg/exoscale/testdata/exoscale/TestObjectStorageTestSuite/TestMetrics.yaml index 7f8c801..f677968 100644 --- a/pkg/exoscale/testdata/exoscale/TestObjectStorageTestSuite/TestMetrics.yaml +++ b/pkg/exoscale/testdata/exoscale/TestObjectStorageTestSuite/TestMetrics.yaml @@ -74,3 +74,76 @@ interactions: status: 200 OK code: 200 duration: 121.733547ms + - id: 1 + request: + proto: HTTP/1.1 + proto_major: 1 + proto_minor: 1 + content_length: 0 + transfer_encoding: [] + trailer: {} + host: api-ch-gva-2.exoscale.com + remote_addr: "" + request_uri: "" + body: "" + form: {} + headers: + User-Agent: + - egoscale/0.90.1 (go1.19.4; linux/amd64) + url: https://api-ch-gva-2.exoscale.com/v2/sos-buckets-usage + method: GET + response: + proto: HTTP/2.0 + proto_major: 2 + proto_minor: 0 + transfer_encoding: [] + trailer: {} + content_length: -1 + uncompressed: false + body: | + { + "sos-buckets-usage": [ + { + "name": "example-project-a", + "created-at": "2023-01-01T09:42:00+00:00", + "zone-name": "ch-gva-2", + "size": 1000000000000 + }, + { + "name": "example-project-b", + "created-at": "2023-01-01T09:42:00+00:00", + "zone-name": "ch-gva-2", + "size": 1000000000 + }, + { + "name": "next-big-thing-a", + "created-at": "2023-01-01T09:42:00+00:00", + "zone-name": "ch-gva-2", + "size": 0 + }, + { + "name": "not-mapped", + "created-at": "2023-01-01T09:42:00+00:00", + "zone-name": "ch-gva-2", + "size": 12345567889 + } + ] + } + headers: + Content-Type: + - application/json; charset=utf-8 + Date: + - Mon, 16 Jan 2023 12:32:10 GMT + Exo-Cm-Hash: + - fa090b3af8b19f68b7ffe515ecedefef + Referrer-Policy: + - no-referrer-when-downgrade + Strict-Transport-Security: + - max-age=31557600; includeSubDomains; preload + X-Content-Type-Options: + - nosniff always + X-Xss-Protection: + - 1; mode=block always + status: 200 OK + code: 200 + duration: 121.733547ms diff --git a/pkg/kubernetes/client.go b/pkg/kubernetes/client.go index 26ddca1..1366f21 100644 --- a/pkg/kubernetes/client.go +++ b/pkg/kubernetes/client.go @@ -1,8 +1,15 @@ package kubernetes import ( + "context" "fmt" + orgv1 "github.com/appuio/control-api/apis/organization/v1" + "github.com/vshn/billing-collector-cloudservices/pkg/log" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + ctrl "sigs.k8s.io/controller-runtime" + cloudscaleapis "github.com/vshn/provider-cloudscale/apis" exoapis "github.com/vshn/provider-exoscale/apis" corev1 "k8s.io/api/core/v1" @@ -12,6 +19,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +const ( + // OrganizationLabel represents the label used for organization when fetching the metrics + OrganizationLabel = "appuio.io/organization" +) + // NewClient creates a k8s client from the server url and token url // If kubeconfig (path to it) is supplied, that takes precedence. Its use is mainly for local development // since local clusters usually don't have a valid certificate. @@ -26,20 +38,34 @@ func NewClient(kubeconfig, url, token string) (client.Client, error) { if err := cloudscaleapis.AddToScheme(scheme); err != nil { return nil, fmt.Errorf("cloudscale scheme: %w", err) } + if err := orgv1.AddToScheme(scheme); err != nil { + return nil, fmt.Errorf("control api org scheme: %w", err) + } + var c client.Client + var err error config, err := restConfig(kubeconfig, url, token) if err != nil { - return nil, fmt.Errorf("k8s rest config: %w", err) + return nil, fmt.Errorf("cannot initialize k8s client: %w", err) + } + if kubeconfig != "" || (url != "" && token != "") { + c, err = client.New(config, client.Options{ + Scheme: scheme, + }) + if err != nil { + return nil, fmt.Errorf("cannot create new k8s client: %w", err) + } + } else { + c, err = client.New(ctrl.GetConfigOrDie(), client.Options{ + Scheme: scheme, + }) } - - c, err := client.New(config, client.Options{ - Scheme: scheme, - }) if err != nil { return nil, fmt.Errorf("cannot initialize k8s client: %w", err) } return c, nil + } func restConfig(kubeconfig string, url string, token string) (*rest.Config, error) { @@ -49,3 +75,31 @@ func restConfig(kubeconfig string, url string, token string) (*rest.Config, erro } return &rest.Config{Host: url, BearerToken: token}, nil } + +func FetchNamespaceWithOrganizationMap(ctx context.Context, k8sClient client.Client) (map[string]string, error) { + logger := log.Logger(ctx) + + gvk := schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "NamespaceList", + } + list := &metav1.PartialObjectMetadataList{} + list.SetGroupVersionKind(gvk) + + err := k8sClient.List(ctx, list) + if err != nil { + return nil, fmt.Errorf("cannot get namespace list: %w", err) + } + + namespaces := map[string]string{} + for _, ns := range list.Items { + orgLabel, ok := ns.GetLabels()[OrganizationLabel] + if !ok { + logger.Info("Organization label not found in namespace", "namespace", ns.GetName()) + continue + } + namespaces[ns.GetName()] = orgLabel + } + return namespaces, nil +} diff --git a/pkg/odoo/odoo.go b/pkg/odoo/odoo.go new file mode 100644 index 0000000..a8015b0 --- /dev/null +++ b/pkg/odoo/odoo.go @@ -0,0 +1,101 @@ +package odoo + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/go-logr/logr" + "golang.org/x/oauth2/clientcredentials" +) + +const ( + GB = "GB" + GBDay = "GBDay" + KReq = "KReq" + InstanceHour = "InstanceHour" +) + +type OdooAPIClient struct { + odooURL string + logger logr.Logger + oauthClient *http.Client +} + +type apiObject struct { + Data []OdooMeteredBillingRecord `json:"data"` +} + +type OdooMeteredBillingRecord struct { + ProductID string `json:"product_id"` + InstanceID string `json:"instance_id"` + ItemDescription string `json:"item_description,omitempty"` + ItemGroupDescription string `json:"item_group_description,omitempty"` + SalesOrder string `json:"sales_order_id"` + UnitID string `json:"unit_id"` + ConsumedUnits float64 `json:"consumed_units"` + TimeRange TimeRange `json:"timerange"` +} + +type TimeRange struct { + From time.Time + To time.Time +} + +func (t TimeRange) MarshalJSON() ([]byte, error) { + return []byte(`"` + t.From.Format(time.RFC3339) + "/" + t.To.Format(time.RFC3339) + `"`), nil +} + +func (t *TimeRange) UnmarshalJSON([]byte) error { + return errors.New("Not implemented") +} + +func NewOdooAPIClient(ctx context.Context, odooURL string, oauthTokenURL string, oauthClientId string, oauthClientSecret string, logger logr.Logger) *OdooAPIClient { + oauthConfig := clientcredentials.Config{ + ClientID: oauthClientId, + ClientSecret: oauthClientSecret, + TokenURL: oauthTokenURL, + } + oauthClient := oauthConfig.Client(ctx) + return &OdooAPIClient{ + odooURL: odooURL, + logger: logger, + oauthClient: oauthClient, + } +} + +func (c OdooAPIClient) SendData(data []OdooMeteredBillingRecord) error { + apiObject := apiObject{ + Data: data, + } + str, err := json.Marshal(apiObject) + if err != nil { + return err + } + resp, err := c.oauthClient.Post(c.odooURL, "application/json", bytes.NewBuffer(str)) + if err != nil { + return err + } + defer resp.Body.Close() + body, _ := io.ReadAll(resp.Body) + c.logger.Info("Records sent to Odoo API", "status", resp.Status, "body", string(body), "numberOfRecords", len(data)) + + if resp.StatusCode != 200 { + return errors.New(fmt.Sprintf("API error when sending records to Odoo:\n%s", body)) + } + + return nil +} + +func LoadUOM(uom string) (m map[string]string, err error) { + err = json.Unmarshal([]byte(uom), &m) + if err != nil || len(m) == 0 { + return nil, fmt.Errorf("no unit of measure found: %v", err) + } + return m, nil +} diff --git a/pkg/reporting/categories.go b/pkg/reporting/categories.go deleted file mode 100644 index 99a36b3..0000000 --- a/pkg/reporting/categories.go +++ /dev/null @@ -1,42 +0,0 @@ -package reporting - -import ( - "context" - "fmt" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/jmoiron/sqlx" -) - -func GetCategory(ctx context.Context, tx *sqlx.Tx, source string) (*db.Category, error) { - var categories []db.Category - err := sqlx.SelectContext(ctx, tx, &categories, `SELECT categories.* FROM categories WHERE source = $1`, source) - if err != nil { - return nil, fmt.Errorf("cannot get categories by source %s: %w", source, err) - } - if len(categories) == 0 { - return nil, nil - } - return &categories[0], nil -} - -func EnsureCategory(ctx context.Context, tx *sqlx.Tx, cat *db.Category) (*db.Category, error) { - category, err := GetCategory(ctx, tx, cat.Source) - if err != nil { - return nil, err - } - if category == nil { - return createCategory(tx, cat) - } - return category, nil -} - -func createCategory(p db.NamedPreparer, in *db.Category) (*db.Category, error) { - var category db.Category - err := db.GetNamed(p, &category, - "INSERT INTO categories (source,target) VALUES (:source,:target) RETURNING *", in) - if err != nil { - err = fmt.Errorf("cannot create category %v: %w", in, err) - } - return &category, err -} diff --git a/pkg/reporting/dateTimes.go b/pkg/reporting/dateTimes.go deleted file mode 100644 index 6e2eb80..0000000 --- a/pkg/reporting/dateTimes.go +++ /dev/null @@ -1,54 +0,0 @@ -package reporting - -import ( - "context" - "fmt" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/jmoiron/sqlx" -) - -func GetDateTime(ctx context.Context, tx *sqlx.Tx, timestamp time.Time) (*db.DateTime, error) { - var dateTimes []db.DateTime - err := sqlx.SelectContext(ctx, tx, &dateTimes, `SELECT date_times.* FROM date_times WHERE timestamp = $1`, timestamp) - if err != nil { - return nil, fmt.Errorf("cannot get timestamps by timestamp %s: %w", timestamp, err) - } - if len(dateTimes) == 0 { - return nil, nil - } - return &dateTimes[0], nil -} - -func EnsureDateTime(ctx context.Context, tx *sqlx.Tx, dt *db.DateTime) (*db.DateTime, error) { - dateTime, err := GetDateTime(ctx, tx, dt.Timestamp) - if err != nil { - return nil, err - } - if dateTime == nil { - return createDateTime(tx, dt) - } - return dateTime, nil -} - -func createDateTime(p db.NamedPreparer, in *db.DateTime) (*db.DateTime, error) { - var dateTime db.DateTime - err := db.GetNamed(p, &dateTime, - "INSERT INTO date_times (timestamp, year, month, day, hour) VALUES (:timestamp, :year, :month, :day, :hour) RETURNING *", in) - if err != nil { - err = fmt.Errorf("cannot create datetime %v: %w", in, err) - } - return &dateTime, err -} - -func NewDateTime(timestamp time.Time) *db.DateTime { - timestamp = timestamp.In(time.UTC) - return &db.DateTime{ - Timestamp: timestamp, - Year: timestamp.Year(), - Month: int(timestamp.Month()), - Day: timestamp.Day(), - Hour: timestamp.Hour(), - } -} diff --git a/pkg/reporting/discounts.go b/pkg/reporting/discounts.go deleted file mode 100644 index 18aff13..0000000 --- a/pkg/reporting/discounts.go +++ /dev/null @@ -1,105 +0,0 @@ -package reporting - -import ( - "context" - "fmt" - "reflect" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/jmoiron/sqlx" - ctrl "sigs.k8s.io/controller-runtime" -) - -func fetchDiscount(ctx context.Context, tx *sqlx.Tx, source string) (*db.Discount, error) { - var discounts []db.Discount - err := sqlx.SelectContext(ctx, tx, &discounts, `SELECT discounts.* FROM discounts WHERE source = $1`, source) - if err != nil { - return nil, fmt.Errorf("cannot get discounts by source %s: %w", source, err) - } - if len(discounts) == 0 { - return nil, nil - } - return &discounts[0], nil -} - -func fetchDiscountBySourceQueryAndTime(ctx context.Context, tx *sqlx.Tx, sourceQuery string, timestamp time.Time) ([]db.Discount, error) { - var discounts []db.Discount - err := sqlx.SelectContext(ctx, tx, &discounts, - `SELECT discounts.* FROM discounts - WHERE (source = $1 OR source LIKE $2) - AND during @> $3::timestamptz`, - sourceQuery, sourceQuery+":%", timestamp) - if err != nil { - return nil, fmt.Errorf("cannot get discounts by sourceQuery %s and timestamp %s: %w", sourceQuery, timestamp, err) - } - return discounts, nil -} - -func GetBestMatchingDiscount(ctx context.Context, tx *sqlx.Tx, source string, timestamp time.Time) (*db.Discount, error) { - tokenizedSource := NewTokenizedSource(source) - candidateDiscounts, err := fetchDiscountBySourceQueryAndTime(ctx, tx, tokenizedSource.Tokens[0], timestamp) - if err != nil { - return nil, err - } - - candidateSourcePatterns := make([]*TokenizedSource, len(candidateDiscounts)) - for i, candidateDiscount := range candidateDiscounts { - candidateSourcePatterns[i] = NewTokenizedSource(candidateDiscount.Source) - } - - match := FindBestMatchingTokenizedSource(tokenizedSource, candidateSourcePatterns) - - for _, candidateDiscount := range candidateDiscounts { - if candidateDiscount.Source == match.String() { - return &candidateDiscount, nil - } - } - - return nil, nil -} - -func EnsureDiscount(ctx context.Context, tx *sqlx.Tx, ensureDiscount *db.Discount) (*db.Discount, error) { - logger := ctrl.LoggerFrom(ctx) - - discount, err := fetchDiscount(ctx, tx, ensureDiscount.Source) - if err != nil { - return nil, err - } - if discount == nil { - discount, err = createDiscount(tx, ensureDiscount) - if err != nil { - return nil, err - } - } else { - ensureDiscount.Id = discount.Id - if !reflect.DeepEqual(discount, ensureDiscount) { - logger.Info("updating discount", "id", discount.Id) - err = updateDiscount(tx, ensureDiscount) - if err != nil { - return nil, err - } - } - } - return discount, nil -} - -func createDiscount(p db.NamedPreparer, in *db.Discount) (*db.Discount, error) { - var discount db.Discount - err := db.GetNamed(p, &discount, - "INSERT INTO discounts (source,discount,during) VALUES (:source,:discount,:during) RETURNING *", in) - if err != nil { - err = fmt.Errorf("cannot create discount %v: %w", in, err) - } - return &discount, err -} - -func updateDiscount(p db.NamedPreparer, in *db.Discount) error { - var discount db.Discount - err := db.GetNamed(p, &discount, - "UPDATE discounts SET source=:source, discount=:target, during=:during WHERE id=:id RETURNING *", in) - if err != nil { - err = fmt.Errorf("cannot update discount %v: %w", in, err) - } - return err -} diff --git a/pkg/reporting/facts.go b/pkg/reporting/facts.go deleted file mode 100644 index efb48d5..0000000 --- a/pkg/reporting/facts.go +++ /dev/null @@ -1,82 +0,0 @@ -package reporting - -import ( - "context" - "fmt" - "reflect" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/jmoiron/sqlx" - ctrl "sigs.k8s.io/controller-runtime" -) - -func GetByFact(ctx context.Context, tx *sqlx.Tx, fact *db.Fact) (*db.Fact, error) { - var facts []db.Fact - err := sqlx.SelectContext(ctx, tx, &facts, - `SELECT facts.* FROM facts WHERE date_time_id = $1 AND query_id = $2 AND tenant_id = $3 AND category_id = $4 AND product_id = $5 AND discount_id = $6`, - fact.DateTimeId, fact.QueryId, fact.TenantId, fact.CategoryId, fact.ProductId, fact.DiscountId) - if err != nil { - return nil, fmt.Errorf("cannot get facts by fact %v: %w", fact, err) - } - if len(facts) == 0 { - return nil, nil - } - return &facts[0], nil -} - -func EnsureFact(ctx context.Context, tx *sqlx.Tx, ensureFact *db.Fact) (*db.Fact, error) { - logger := ctrl.LoggerFrom(ctx) - - fact, err := GetByFact(ctx, tx, ensureFact) - if err != nil { - return nil, err - } - if fact == nil { - fact, err = createFact(tx, ensureFact) - if err != nil { - return nil, err - } - } else { - ensureFact.Id = fact.Id - if !reflect.DeepEqual(fact, ensureFact) { - logger.Info("updating fact", "id", fact.Id) - err = updateFact(tx, ensureFact) - if err != nil { - return nil, err - } - } - } - return fact, nil -} - -func createFact(p db.NamedPreparer, in *db.Fact) (*db.Fact, error) { - var category db.Fact - err := db.GetNamed(p, &category, - "INSERT INTO facts (date_time_id, query_id, tenant_id, category_id, product_id, discount_id, quantity) VALUES (:date_time_id, :query_id, :tenant_id, :category_id, :product_id, :discount_id, :quantity) RETURNING *", in) - if err != nil { - err = fmt.Errorf("cannot create fact %v: %w", in, err) - } - return &category, err -} - -func updateFact(p db.NamedPreparer, in *db.Fact) error { - var fact db.Fact - err := db.GetNamed(p, &fact, - "UPDATE facts SET date_time_id=:date_time_id, query_id=:query_id, tenant_id=:tenant_id, category_id=:category_id, product_id=:product_id, discount_id=:discount_id, quantity=:quantity WHERE id=:id RETURNING *", in) - if err != nil { - err = fmt.Errorf("cannot update fact %v: %w", in, err) - } - return err -} - -func NewFact(dateTime *db.DateTime, query *db.Query, tenant *db.Tenant, category *db.Category, product *db.Product, discount *db.Discount, quanity float64) *db.Fact { - return &db.Fact{ - DateTimeId: dateTime.Id, - QueryId: query.Id, - TenantId: tenant.Id, - CategoryId: category.Id, - ProductId: product.Id, - DiscountId: discount.Id, - Quantity: quanity, - } -} diff --git a/pkg/reporting/products.go b/pkg/reporting/products.go deleted file mode 100644 index f446e82..0000000 --- a/pkg/reporting/products.go +++ /dev/null @@ -1,105 +0,0 @@ -package reporting - -import ( - "context" - "fmt" - "reflect" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/jmoiron/sqlx" - ctrl "sigs.k8s.io/controller-runtime" -) - -func getProductBySource(ctx context.Context, tx *sqlx.Tx, source string) (*db.Product, error) { - var products []db.Product - err := sqlx.SelectContext(ctx, tx, &products, `SELECT products.* FROM products WHERE source = $1`, source) - if err != nil { - return nil, err - } - if len(products) == 0 { - return nil, nil - } - return &products[0], nil -} - -func getProductBySourceQueryAndTime(ctx context.Context, tx *sqlx.Tx, sourceQuery string, timestamp time.Time) ([]db.Product, error) { - var products []db.Product - err := sqlx.SelectContext(ctx, tx, &products, - `SELECT products.* FROM products - WHERE (source = $1 OR source LIKE $2) - AND during @> $3::timestamptz`, - sourceQuery, sourceQuery+":%", timestamp) - if err != nil { - return nil, fmt.Errorf("cannot get products by sourceQuery %s and timestamp %s: %w", sourceQuery, timestamp, err) - } - return products, nil -} - -func GetBestMatchingProduct(ctx context.Context, tx *sqlx.Tx, source string, timestamp time.Time) (*db.Product, error) { - tokenizedSource := NewTokenizedSource(source) - candidateProducts, err := getProductBySourceQueryAndTime(ctx, tx, tokenizedSource.Tokens[0], timestamp) - if err != nil { - return nil, err - } - - candidateSourcePatterns := make([]*TokenizedSource, len(candidateProducts)) - for i, candidateProduct := range candidateProducts { - candidateSourcePatterns[i] = NewTokenizedSource(candidateProduct.Source) - } - - match := FindBestMatchingTokenizedSource(tokenizedSource, candidateSourcePatterns) - - for _, candidateProduct := range candidateProducts { - if candidateProduct.Source == match.String() { - return &candidateProduct, nil - } - } - - return nil, nil -} - -func EnsureProduct(ctx context.Context, tx *sqlx.Tx, ensureProduct *db.Product) (*db.Product, error) { - logger := ctrl.LoggerFrom(ctx) - - product, err := getProductBySource(ctx, tx, ensureProduct.Source) - if err != nil { - return nil, err - } - if product == nil { - product, err = createProduct(tx, ensureProduct) - if err != nil { - return nil, err - } - } else { - ensureProduct.Id = product.Id - if !reflect.DeepEqual(product, ensureProduct) { - logger.Info("updating product", "id", product.Id) - err = updateProduct(tx, ensureProduct) - if err != nil { - return nil, err - } - } - } - return product, nil -} - -func createProduct(p db.NamedPreparer, in *db.Product) (*db.Product, error) { - var product db.Product - err := db.GetNamed(p, &product, - "INSERT INTO products (source,target,amount,unit,during) VALUES (:source,:target,:amount,:unit,:during) RETURNING *", in) - if err != nil { - err = fmt.Errorf("cannot create product %v: %w", in, err) - } - return &product, err -} - -func updateProduct(p db.NamedPreparer, in *db.Product) error { - var product db.Product - err := db.GetNamed(p, &product, - "UPDATE products SET source=:source, target=:target, amount=:amount, unit=:unit, during=:during WHERE id=:id RETURNING *", in) - if err != nil { - err = fmt.Errorf("cannot update product %v: %w", in, err) - } - return err -} diff --git a/pkg/reporting/queries.go b/pkg/reporting/queries.go deleted file mode 100644 index 9380b87..0000000 --- a/pkg/reporting/queries.go +++ /dev/null @@ -1,68 +0,0 @@ -package reporting - -import ( - "context" - "fmt" - "reflect" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/jmoiron/sqlx" - ctrl "sigs.k8s.io/controller-runtime" -) - -func GetQueryByName(ctx context.Context, tx *sqlx.Tx, name string) (*db.Query, error) { - var queries []db.Query - err := sqlx.SelectContext(ctx, tx, &queries, `SELECT queries.* FROM queries WHERE name = $1`, name) - if err != nil { - return nil, fmt.Errorf("cannot get queries by name %s: %w", name, err) - } - if len(queries) == 0 { - return nil, nil - } - return &queries[0], nil -} - -func EnsureQuery(ctx context.Context, tx *sqlx.Tx, ensureQuery *db.Query) (*db.Query, error) { - logger := ctrl.LoggerFrom(ctx) - - query, err := GetQueryByName(ctx, tx, ensureQuery.Name) - if err != nil { - return nil, err - } - if query == nil { - query, err = createQuery(tx, ensureQuery) - if err != nil { - return nil, err - } - } else { - ensureQuery.Id = query.Id - if !reflect.DeepEqual(query, ensureQuery) { - logger.Info("updating query", "id", query.Id) - err = updateQuery(tx, ensureQuery) - if err != nil { - return nil, err - } - } - } - return query, nil -} - -func createQuery(p db.NamedPreparer, in *db.Query) (*db.Query, error) { - var query db.Query - err := db.GetNamed(p, &query, - "INSERT INTO queries (parent_id, name, description, query, unit, during) VALUES (:parent_id, :name, :description, :query, :unit, :during) RETURNING *", in) - if err != nil { - err = fmt.Errorf("cannot create query %v: %w", in, err) - } - return &query, err -} - -func updateQuery(p db.NamedPreparer, in *db.Query) error { - var query db.Query - err := db.GetNamed(p, &query, - "UPDATE queries SET name=:name, description=:description, query=:query, unit=:unit, during=:during, parent_id=:parent_id WHERE id=:id RETURNING *", in) - if err != nil { - err = fmt.Errorf("cannot update query %v: %w", in, err) - } - return err -} diff --git a/pkg/reporting/store.go b/pkg/reporting/store.go deleted file mode 100644 index 71dc73d..0000000 --- a/pkg/reporting/store.go +++ /dev/null @@ -1,168 +0,0 @@ -package reporting - -import ( - "context" - "database/sql" - "errors" - "fmt" - "time" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/go-logr/logr" - "github.com/jmoiron/sqlx" - ctrl "sigs.k8s.io/controller-runtime" -) - -type Store struct { - db *sqlx.DB - logger logr.Logger -} - -// NewStore opens up a db connection to the specified db. -func NewStore(url string, logger logr.Logger) (*Store, error) { - rdb, err := db.Openx(url) - if err != nil { - return nil, fmt.Errorf("newReporting: open db failed: %w", err) - } - - return &Store{ - db: rdb, - logger: logger, - }, nil -} - -// Close the db connection. -func (r *Store) Close() error { - return r.db.Close() -} - -// WithTransaction runs fn within a transaction and does commit/rollback when necessary. -func (r *Store) WithTransaction(ctx context.Context, fn func(*sqlx.Tx) error) error { - tx, err := r.db.BeginTxx(ctx, &sql.TxOptions{}) - if err != nil { - return fmt.Errorf("beginTransaction: transaction failed: %w", err) - } - - defer func() { - if p := recover(); p != nil { - // a panic occurred, rollback and repanic - err := tx.Rollback() - if err != nil && !errors.Is(err, sql.ErrTxDone) { - r.logger.Error(err, "unable to rollback after panic") - } - panic(p) - } else if err != nil { - // something went wrong, rollback - err := tx.Rollback() - if err != nil && !errors.Is(err, sql.ErrTxDone) { - r.logger.Error(err, "unable to rollback after error") - } - } - }() - - if err := fn(tx); err != nil { - return err - } - - if err := tx.Commit(); err != nil { - return fmt.Errorf("tx: unable to commit: %w", err) - } - return nil -} - -// Initialize uses a transaction to ensure given entities in reporting store. -func (r *Store) Initialize(ctx context.Context, products []*db.Product, discounts []*db.Discount, queries []*db.Query) error { - err := r.WithTransaction(ctx, func(tx *sqlx.Tx) error { - for _, product := range products { - _, err := EnsureProduct(ctx, tx, product) - if err != nil { - return fmt.Errorf("product ensure: %w", err) - } - } - for _, discount := range discounts { - _, err := EnsureDiscount(ctx, tx, discount) - if err != nil { - return fmt.Errorf("discount ensure: %w", err) - } - } - for _, query := range queries { - _, err := EnsureQuery(ctx, tx, query) - if err != nil { - return fmt.Errorf("query ensure: %w", err) - } - } - return nil - }) - if err != nil { - return fmt.Errorf("initialize: %w", err) - } - return nil -} - -type Record struct { - TenantSource string - CategorySource string - BillingDate time.Time - ProductSource string - DiscountSource string - QueryName string - Value float64 -} - -func (r *Store) WriteRecord(ctx context.Context, record Record) error { - return r.WithTransaction(ctx, func(tx *sqlx.Tx) error { - tenant, err := EnsureTenant(ctx, tx, &db.Tenant{Source: record.TenantSource}) - if err != nil { - return fmt.Errorf("EnsureTenant(%q): %w", record.TenantSource, err) - } - - category, err := EnsureCategory(ctx, tx, &db.Category{Source: record.CategorySource}) - if err != nil { - return fmt.Errorf("EnsureCategory(%q): %w", record.CategorySource, err) - } - - dateTime, err := EnsureDateTime(ctx, tx, NewDateTime(record.BillingDate)) - if err != nil { - return fmt.Errorf("EnsureDateTime(%q): %w", record.BillingDate, err) - } - - product, err := GetBestMatchingProduct(ctx, tx, record.ProductSource, record.BillingDate) - if err != nil { - return fmt.Errorf("GetBestMatchingProduct(%q, %q): %w", record.ProductSource, record.BillingDate, err) - } - - discount, err := GetBestMatchingDiscount(ctx, tx, record.DiscountSource, record.BillingDate) - if err != nil { - return fmt.Errorf("GetBestMatchingDiscount(%q, %q): %w", record.DiscountSource, record.BillingDate, err) - } - - query, err := GetQueryByName(ctx, tx, record.QueryName) - if err != nil { - return fmt.Errorf("GetQueryByName(%q): %w", record.QueryName, err) - } - - fact := NewFact(dateTime, query, tenant, category, product, discount, record.Value) - if !isFactUpdatable(ctx, tx, fact, record.Value) { - return nil - } - - _, err = EnsureFact(ctx, tx, fact) - if err != nil { - return fmt.Errorf("EnsureFact: %w", err) - } - return nil - }) -} - -// isFactUpdatable makes sure that only missing data or higher quantity values are saved in the billing database -func isFactUpdatable(ctx context.Context, tx *sqlx.Tx, f *db.Fact, value float64) bool { - logger := ctrl.LoggerFrom(ctx) - - fact, _ := GetByFact(ctx, tx, f) - if fact == nil || fact.Quantity < value { - return true - } - logger.Info(fmt.Sprintf("skipped saving, higher or equal number already recorded in DB "+ - "for this hour: saved: \"%v\", new: \"%v\"", fact.Quantity, value)) - return false -} diff --git a/pkg/reporting/tenants.go b/pkg/reporting/tenants.go deleted file mode 100644 index 7a50a3c..0000000 --- a/pkg/reporting/tenants.go +++ /dev/null @@ -1,45 +0,0 @@ -package reporting - -import ( - "context" - "fmt" - - "github.com/appuio/appuio-cloud-reporting/pkg/db" - "github.com/jmoiron/sqlx" -) - -func GetTenantBySource(ctx context.Context, tx *sqlx.Tx, source string) (*db.Tenant, error) { - var tenants []db.Tenant - err := sqlx.SelectContext(ctx, tx, &tenants, `SELECT tenants.* FROM tenants WHERE source = $1 limit 1`, source) - if err != nil { - return nil, fmt.Errorf("cannot get tenants by source %s: %w", source, err) - } - if len(tenants) == 0 { - return nil, nil - } - return &tenants[0], nil -} - -func EnsureTenant(ctx context.Context, tx *sqlx.Tx, ensureTenant *db.Tenant) (*db.Tenant, error) { - tenant, err := GetTenantBySource(ctx, tx, ensureTenant.Source) - if err != nil { - return nil, err - } - if tenant == nil { - tenant, err = createTenant(tx, ensureTenant) - if err != nil { - return nil, err - } - } - return tenant, nil -} - -func createTenant(p db.NamedPreparer, in *db.Tenant) (*db.Tenant, error) { - var tenant db.Tenant - err := db.GetNamed(p, &tenant, - "INSERT INTO tenants (source,target) VALUES (:source,:target) RETURNING *", in) - if err != nil { - err = fmt.Errorf("cannot create tenant %v: %w", in, err) - } - return &tenant, err -} diff --git a/pkg/reporting/tokenizer.go b/pkg/reporting/tokenizer.go deleted file mode 100644 index 357c91d..0000000 --- a/pkg/reporting/tokenizer.go +++ /dev/null @@ -1,75 +0,0 @@ -package reporting - -import "strings" - -// Does not do much, we mostly have this to make the code more readable (better than 2D string arrays) -type TokenizedSource struct { - Tokens []string -} - -func (ts TokenizedSource) String() string { - return strings.Join(ts.Tokens, ":") -} - -func (ts *TokenizedSource) Equals(other *TokenizedSource) bool { - if ts == nil && other == nil { - return true - } - if ts == nil || other == nil { - return false - } - return ts.String() == other.String() -} - -func NewTokenizedSource(source string) *TokenizedSource { - return &TokenizedSource{ - Tokens: strings.Split(source, ":"), - } -} - -func generatePatterns(reference *TokenizedSource) []*TokenizedSource { - if len(reference.Tokens) > 10 { - panic("No more than 10 tokens supported. More tokens lead to an explosion of possible wildcard positions") - } - - var patterns []*TokenizedSource - - // start with all the tokens and use one less with every iteration - for i := len(reference.Tokens); i > 0; i-- { - limitedTokens := reference.Tokens[0:i] - - patterns = append(patterns, &TokenizedSource{Tokens: limitedTokens}) - if len(limitedTokens) > 2 { - // we're setting the wildcards as if we were counting. 'j' is our counter, starting at 1 (one single - // wildcard at the rightmost allowed position) - for j := 1; j < (1 << (len(limitedTokens) - 2)); j++ { - // create copy of limitedTokens. We can't modify limitedTokens directly. - var wildcardedTokens []string - wildcardedTokens = append(wildcardedTokens, limitedTokens...) - - // 'p' goes through all the bits of 'j' and checks if they are set. If yes, it places a wildcard. - for p := 0; p < len(wildcardedTokens)-2; p++ { - if j&(1< 0 { - wildcardedTokens[len(wildcardedTokens)-2-p] = "*" - } - } - patterns = append(patterns, &TokenizedSource{Tokens: wildcardedTokens}) - } - } - } - - return patterns -} - -func FindBestMatchingTokenizedSource(reference *TokenizedSource, candidates []*TokenizedSource) *TokenizedSource { - patterns := generatePatterns(reference) - for _, pattern := range patterns { - for _, candidate := range candidates { - if candidate.String() == pattern.String() { - return candidate - } - } - } - - return nil -} diff --git a/pkg/reporting/tokenizer_test.go b/pkg/reporting/tokenizer_test.go deleted file mode 100644 index 3d78195..0000000 --- a/pkg/reporting/tokenizer_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package reporting - -import ( - "testing" -) - -func testBestMatch(t *testing.T, reference string, candidates []string, requiredResult *TokenizedSource) { - referenceTS := NewTokenizedSource(reference) - candidatesTS := make([]*TokenizedSource, len(candidates)) - for i, candidate := range candidates { - candidatesTS[i] = NewTokenizedSource(candidate) - } - bestMatch := FindBestMatchingTokenizedSource(referenceTS, candidatesTS) - if !requiredResult.Equals(bestMatch) { - t.Errorf("best Match should have been '%s', was '%s'", requiredResult, bestMatch) - } -} - -func Test(t *testing.T) { - testBestMatch(t, "a:b:c:d", []string{"a", "a:b", "a:*:c"}, NewTokenizedSource("a:*:c")) - testBestMatch(t, "a:b:c:d", []string{"a", "a:x", "a:*:y"}, NewTokenizedSource("a")) - testBestMatch(t, "a:b:c:d", []string{"a", "a:b"}, NewTokenizedSource("a:b")) - testBestMatch(t, "a:b:c:d", []string{"x", "x:y"}, nil) - testBestMatch(t, "a:b:c:d", []string{}, nil) - testBestMatch(t, "a:b:c:d", []string{"a:b:c:d"}, NewTokenizedSource("a:b:c:d")) - testBestMatch(t, "a:b:c:d", []string{"a:b:c", "a:b:c:d"}, NewTokenizedSource("a:b:c:d")) - testBestMatch(t, "a:b:c:d", []string{"a:b:c", "a:b:c:d", "a:b:*:d"}, NewTokenizedSource("a:b:c:d")) - testBestMatch(t, "a:b:c:d", []string{"a:b:*:d", "a:b:c", "a:b:c:d", "a:b:*:d"}, NewTokenizedSource("a:b:c:d")) - testBestMatch(t, "a:b:c:d", []string{"a:b:*:d", "a:b:c", "a:b:c:d", "a:b:*:d"}, NewTokenizedSource("a:b:c:d")) - testBestMatch(t, "a:b:c:d", []string{"a:b:*:d", "a:*:c:d"}, NewTokenizedSource("a:b:*:d")) - testBestMatch(t, "a:b:c:d", []string{"a:b:c:d", "a:b:*:d"}, NewTokenizedSource("a:b:c:d")) - testBestMatch(t, "a:b:c:d", []string{"a:b:*:d", "a:b:c:d"}, NewTokenizedSource("a:b:c:d")) - testBestMatch(t, "a:b:c:d", []string{"a:b:*:d", "a:*:c:d"}, NewTokenizedSource("a:b:*:d")) - testBestMatch(t, "a:b:c:d", []string{"a:*:c:d", "a:b:*:d"}, NewTokenizedSource("a:b:*:d")) - testBestMatch(t, "a:b:c:d", []string{"a:*:c:d", "a:*:*:d"}, NewTokenizedSource("a:*:c:d")) - testBestMatch(t, "a:b:c:d", []string{"a:*:*:d", "a:*:c:d"}, NewTokenizedSource("a:*:c:d")) - testBestMatch(t, "a:b:c:d", []string{"a:*:*:d", "a:b:c"}, NewTokenizedSource("a:*:*:d")) - testBestMatch(t, "a:b:c:d", []string{"a:b:c", "a:*:*:d"}, NewTokenizedSource("a:*:*:d")) - testBestMatch(t, "a:b:c:d", []string{"a:b:c", "a:*:c"}, NewTokenizedSource("a:b:c")) - testBestMatch(t, "a:b:c:d", []string{"a:*:c", "a:b:c"}, NewTokenizedSource("a:b:c")) - testBestMatch(t, "a:b:c:d", []string{"a:b", "a:*:c"}, NewTokenizedSource("a:*:c")) - testBestMatch(t, "a:b:c:d", []string{"a:*:c", "a:b"}, NewTokenizedSource("a:*:c")) - testBestMatch(t, "a:b:c:d", []string{"a", "a:b"}, NewTokenizedSource("a:b")) - testBestMatch(t, "a:b:c:d", []string{"a:b", "a"}, NewTokenizedSource("a:b")) -} diff --git a/pkg/test/local.mk b/pkg/test/local.mk index 05e4b91..fc48e43 100644 --- a/pkg/test/local.mk +++ b/pkg/test/local.mk @@ -5,9 +5,6 @@ setup_envtest_bin = $(go_bin)/setup-envtest clean_targets += .envtest-clean .acr-clean -ACR_DB_URL ?= "postgres://reporting:reporting@localhost/appuio-cloud-reporting-test?sslmode=disable" -acr_clone_target ?= $(work_dir)/appuio-cloud-reporting - CLOUDSCALE_CRDS_PATH ?= $(shell go list -f '{{.Dir}}' -m github.com/vshn/provider-cloudscale)/package/crds EXOSCALE_CRDS_PATH ?= $(shell go list -f '{{.Dir}}' -m github.com/vshn/provider-exoscale)/package/crds @@ -17,11 +14,10 @@ $(setup_envtest_bin): | $(go_bin) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest .PHONY: test-integration -test-integration: $(setup_envtest_bin) start-acr ## Run integration tests against code +test-integration: $(setup_envtest_bin) ## Run integration tests against code $(setup_envtest_bin) $(ENVTEST_ADDITIONAL_FLAGS) use '$(ENVTEST_K8S_VERSION)!' @chmod -R +w $(go_bin)/k8s export KUBEBUILDER_ASSETS="$$($(setup_envtest_bin) $(ENVTEST_ADDITIONAL_FLAGS) use -i -p path '$(ENVTEST_K8S_VERSION)!')" && \ - export ACR_DB_URL="$(ACR_DB_URL)" && \ export CLOUDSCALE_CRDS_PATH="$(CLOUDSCALE_CRDS_PATH)" && \ export EXOSCALE_CRDS_PATH="$(EXOSCALE_CRDS_PATH)" && \ go test -tags=integration -coverprofile cover.out -covermode atomic ./... @@ -29,26 +25,3 @@ test-integration: $(setup_envtest_bin) start-acr ## Run integration tests agains .PHONY: .envtest-clean .envtest-clean: rm -f $(setup_envtest_bin) - -## ACR setup -$(acr_clone_target): - git clone https://github.com/appuio/appuio-cloud-reporting $@ - -.PHONY: start-acr -start-acr: $(acr_clone_target) ## Starts ACR - pushd $(acr_clone_target) && \ - make docker-compose-up && \ - make ping-postgres && \ - PGPASSWORD=reporting createdb --username=reporting -h localhost -p 5432 appuio-cloud-reporting-test || echo "already exists, skipping createdb" && \ - export ACR_DB_URL="$(ACR_DB_URL)" && \ - go run . migrate && \ - go run . migrate --seed - -.PHONY: stop-acr -stop-acr: $(acr_clone_target) - pushd $(acr_clone_target) && \ - make docker-compose-down - -.PHONY: .acr-clean -.acr-clean: - rm -Rf $(acr_clone_target) diff --git a/pkg/test/suite.go b/pkg/test/suite.go index a830e47..7fae2fc 100644 --- a/pkg/test/suite.go +++ b/pkg/test/suite.go @@ -1,24 +1,18 @@ +//go:build integration + package test import ( "context" "fmt" "net/http" - "net/url" "os" - "strings" "testing" + "time" - "github.com/appuio/appuio-cloud-reporting/pkg/db" "github.com/go-logr/logr" - "github.com/go-logr/zapr" - "github.com/google/uuid" - "github.com/jackc/pgx/v4" - "github.com/jmoiron/sqlx" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/vshn/billing-collector-cloudservices/pkg/reporting" - "go.uber.org/zap/zaptest" + "github.com/vshn/billing-collector-cloudservices/pkg/log" "gopkg.in/dnaeon/go-vcr.v3/cassette" "gopkg.in/dnaeon/go-vcr.v3/recorder" corev1 "k8s.io/api/core/v1" @@ -28,7 +22,6 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/log" ) type Suite struct { @@ -42,38 +35,24 @@ type Suite struct { Context context.Context cancel context.CancelFunc Scheme *runtime.Scheme - - DatabaseURL string - tmpDBName string - maintenanceDB *sqlx.DB } // SetupSuite is used for setting up the testsuite before all tests are run. If you need to override it, make sure to call `SetupEnv()` first. func (ts *Suite) SetupSuite() { - ts.SetupEnv(nil) -} - -// SetupTest ensures a separate temporary DB for each test -func (ts *Suite) SetupTest() { - ts.setupDB() -} - -// TearDownTest cleans up temporary DB after each test -func (ts *Suite) TearDownTest() { - assert := ts.Assert() - assert.NoError(dropDB(ts.maintenanceDB, pgx.Identifier{ts.tmpDBName})) - assert.NoError(ts.maintenanceDB.Close()) + ts.SetupEnv(nil, "") } -func (ts *Suite) SetupEnv(crdPaths []string) { +func (ts *Suite) SetupEnv(crdPaths []string, bindString string) { ts.T().Helper() assert := ts.Assert() - ts.Logger = zapr.NewLogger(zaptest.NewLogger(ts.T())) - log.SetLogger(ts.Logger) + logger, err := log.NewLogger("integrationtest", time.Now().String(), 1, "console") + assert.NoError(err, "cannot initialize logger") - ts.Context, ts.cancel = context.WithCancel(context.Background()) + ts.Context = log.NewLoggingContext(context.Background(), logger) + ts.Logger = logger + ts.Context, ts.cancel = context.WithCancel(ts.Context) envtestAssets, ok := os.LookupEnv("KUBEBUILDER_ASSETS") if !ok { @@ -143,69 +122,6 @@ func (ts *Suite) EnsureNS(name string, labels map[string]string) { }) } -func (ts *Suite) setupDB() { - ts.T().Helper() - - assert := ts.Assert() - - databaseURL := os.Getenv("ACR_DB_URL") - assert.NotZero(databaseURL) - - u, err := url.Parse(databaseURL) - assert.NoError(err) - - dbName := strings.TrimPrefix(u.Path, "/") - tmpDbName := dbName + "-tmp-" + uuid.NewString() - ts.tmpDBName = tmpDbName - - // Connect to a neutral database - mdb, err := openMaintenance(databaseURL) - require.NoError(ts.T(), err) - ts.maintenanceDB = mdb - - require.NoError(ts.T(), - cloneDB(ts.maintenanceDB, pgx.Identifier{tmpDbName}, pgx.Identifier{dbName}), - ) - - // Connect to the temporary database - tmpURL := new(url.URL) - *tmpURL = *u - tmpURL.Path = "/" + tmpDbName - ts.T().Logf("Using database name: %s", tmpDbName) - ts.DatabaseURL = tmpURL.String() -} - -func cloneDB(maint *sqlx.DB, dst, src pgx.Identifier) error { - _, err := maint.Exec(fmt.Sprintf(`CREATE DATABASE %s TEMPLATE %s`, - dst.Sanitize(), - src.Sanitize())) - if err != nil { - return fmt.Errorf("error cloning database `%s` to `%s`: %w", src.Sanitize(), dst.Sanitize(), err) - } - return nil -} - -func dropDB(maint *sqlx.DB, name pgx.Identifier) error { - _, err := maint.Exec(fmt.Sprintf(`DROP DATABASE %s WITH (FORCE)`, name.Sanitize())) - if err != nil { - return fmt.Errorf("error dropping database `%s`: %w", name.Sanitize(), err) - } - return nil -} - -func openMaintenance(dbURL string) (*sqlx.DB, error) { - maintURL, err := url.Parse(dbURL) - if err != nil { - return nil, fmt.Errorf("error parsing url: %w", err) - } - maintURL.Path = "/postgres" - mdb, err := db.Openx(maintURL.String()) - if err != nil { - return nil, fmt.Errorf("error connecting to maintenance (`%s`) database: %w", maintURL.Path, err) - } - return mdb, nil -} - func RequestRecorder(t *testing.T, path string) (*http.Client, func(), error) { t.Helper() @@ -232,43 +148,3 @@ func RequestRecorder(t *testing.T, path string) (*http.Client, func(), error) { return r.GetDefaultClient(), cancel, nil } - -func FactByRecord(ctx context.Context, tx *sqlx.Tx, dt *db.DateTime, record reporting.Record) (*db.Fact, error) { - query, err := reporting.GetQueryByName(ctx, tx, record.QueryName) - if err != nil { - return nil, fmt.Errorf("query: %w", err) - } - - tenant, err := reporting.GetTenantBySource(ctx, tx, record.TenantSource) - if err != nil { - return nil, fmt.Errorf("tenant: %w", err) - } - - category, err := reporting.GetCategory(ctx, tx, record.CategorySource) - if err != nil { - return nil, fmt.Errorf("category: %w", err) - } - - product, err := reporting.GetBestMatchingProduct(ctx, tx, record.ProductSource, record.BillingDate) - if err != nil { - return nil, fmt.Errorf("product: %w", err) - } - - discount, err := reporting.GetBestMatchingDiscount(ctx, tx, record.DiscountSource, record.BillingDate) - if err != nil { - return nil, fmt.Errorf("discount: %w", err) - } - - fact, err := reporting.GetByFact(ctx, tx, &db.Fact{ - DateTimeId: dt.Id, - QueryId: query.Id, - TenantId: tenant.Id, - CategoryId: category.Id, - ProductId: product.Id, - DiscountId: discount.Id, - }) - if err != nil { - return nil, fmt.Errorf("fact: %w", err) - } - return fact, nil -}