From ac1111fd9bbbfe2c602f0df01ac28022b0f7fc9d Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Tue, 5 Mar 2024 16:45:28 +1100 Subject: [PATCH] feat: introduce retention policy support --- Makefile | 4 +- docker-compose.yaml | 2 + ...populate-api-data-ci-local-control-k8s.gql | 50 +++ node-packages/commons/src/tasks.ts | 20 + services/actions-handler/go.mod | 3 + services/actions-handler/go.sum | 13 + .../handler/action_retention.go | 157 +++++++ services/actions-handler/handler/handler.go | 18 +- services/actions-handler/main.go | 39 ++ .../20240408000000_retention_policy.js | 31 ++ services/api/src/models/retentionpolicy.ts | 102 +++++ services/api/src/resolvers.js | 52 ++- services/api/src/resources/backup/sql.ts | 2 +- .../api/src/resources/deployment/resolvers.ts | 37 +- services/api/src/resources/deployment/sql.ts | 54 ++- .../api/src/resources/environment/helpers.ts | 20 +- .../api/src/resources/organization/helpers.ts | 5 + .../api/src/resources/organization/sql.ts | 5 + services/api/src/resources/project/sql.ts | 6 + .../src/resources/retentionpolicy/PAYLOADS.md | 73 ++++ .../src/resources/retentionpolicy/README.md | 134 ++++++ .../src/resources/retentionpolicy/harbor.ts | 72 ++++ .../src/resources/retentionpolicy/helpers.ts | 387 +++++++++++++++++ .../src/resources/retentionpolicy/history.ts | 207 +++++++++ .../resources/retentionpolicy/resolvers.ts | 394 ++++++++++++++++++ .../api/src/resources/retentionpolicy/sql.ts | 123 ++++++ .../src/resources/retentionpolicy/types.ts | 140 +++++++ services/api/src/resources/task/helpers.ts | 4 + services/api/src/resources/task/resolvers.ts | 15 + services/api/src/resources/task/sql.ts | 52 +++ services/api/src/typeDefs.js | 196 +++++++++ services/logs2notifications/main.go | 10 +- 32 files changed, 2409 insertions(+), 18 deletions(-) create mode 100644 services/actions-handler/handler/action_retention.go create mode 100644 services/api/database/migrations/20240408000000_retention_policy.js create mode 100644 services/api/src/models/retentionpolicy.ts create mode 100644 services/api/src/resources/retentionpolicy/PAYLOADS.md create mode 100644 services/api/src/resources/retentionpolicy/README.md create mode 100644 services/api/src/resources/retentionpolicy/harbor.ts create mode 100644 services/api/src/resources/retentionpolicy/helpers.ts create mode 100644 services/api/src/resources/retentionpolicy/history.ts create mode 100644 services/api/src/resources/retentionpolicy/resolvers.ts create mode 100644 services/api/src/resources/retentionpolicy/sql.ts create mode 100644 services/api/src/resources/retentionpolicy/types.ts diff --git a/Makefile b/Makefile index 2e268635a5..c286de0ea9 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,7 @@ BUILD_DEPLOY_IMAGE_TAG ?= edge # OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG and OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY # set this to a particular build image if required, defaults to nothing to consume what the chart provides -OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG= +OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG=retention-policy OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY= # To build k3d with Calico instead of Flannel, set this to true. Note that the Calico install in lagoon-charts is always @@ -371,7 +371,7 @@ STERN_VERSION = v2.6.1 CHART_TESTING_VERSION = v3.10.1 K3D_IMAGE = docker.io/rancher/k3s:v1.28.6-k3s2 TESTS = [nginx,api,features-kubernetes,bulk-deployment,features-kubernetes-2,features-variables,active-standby-kubernetes,tasks,drush,python,gitlab,github,bitbucket,services,workflows] -CHARTS_TREEISH = main +CHARTS_TREEISH = retention-policies TASK_IMAGES = task-activestandby # Symlink the installed kubectl client if the correct version is already diff --git a/docker-compose.yaml b/docker-compose.yaml index 03a6e862b6..64bcbaa9af 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -113,6 +113,8 @@ services: actions-handler: image: ${IMAGE_REPO:-lagoon}/actions-handler restart: on-failure + environment: + - S3_FILES_HOST=http://172.17.0.1:9000 depends_on: - broker ssh: diff --git a/local-dev/api-data-watcher-pusher/api-data/03-populate-api-data-ci-local-control-k8s.gql b/local-dev/api-data-watcher-pusher/api-data/03-populate-api-data-ci-local-control-k8s.gql index a432c18039..146362696c 100644 --- a/local-dev/api-data-watcher-pusher/api-data/03-populate-api-data-ci-local-control-k8s.gql +++ b/local-dev/api-data-watcher-pusher/api-data/03-populate-api-data-ci-local-control-k8s.gql @@ -155,4 +155,54 @@ mutation PopulateApi { id } + RetPol1: createRetentionPolicy(input:{ + name: "harbor" + type: HARBOR + harbor: { + enabled: true + rules: [ + { + name: "all branches, excluding pullrequests" + pattern: "[^pr-]*/*" + latestPulled: 3 + }, + { + name: "pullrequests" + pattern: "pr-*" + latestPulled: 1 + } + ] + schedule: "3 * * * *" + } + }) { + id + name + configuration { + ... on HarborRetentionPolicy { + enabled + rules { + name + pattern + latestPulled + } + schedule + } + } + type + created + updated + } + + RetPolLink1: addRetentionPolicyLink(input:{ + id: 1 + scope: GLOBAL + scopeName: "global", + }) { + id + name + type + source + created + updated + } } diff --git a/node-packages/commons/src/tasks.ts b/node-packages/commons/src/tasks.ts index 7e538a3fe2..d4e4b0aecc 100644 --- a/node-packages/commons/src/tasks.ts +++ b/node-packages/commons/src/tasks.ts @@ -1551,3 +1551,23 @@ export const consumeTaskMonitor = async function( } }); } + + // leverages the `misc` queue to handle retention policy only messages to controller + // this is essentially a clone of createMiscTask, but specifically for retention policies +export const createRetentionPolicyTask = async function(policyData: any) { + var policyPayload: any = { + key: `deploytarget:${policyData.key}`, + misc: {} + } + switch (`deploytarget:${policyData.key}`) { + case 'deploytarget:harborpolicy:update': + // remote-controller has a basic payload resource under `misc` called `miscResource` which can store bytes + // so this b64 encodes the payload event and inserts it into the miscResource so that the remote-controller will understand it + const payloadBytes = new Buffer(JSON.stringify(policyData.data.event).replace(/\\n/g, "\n")).toString('base64') + policyPayload.misc.miscResource = payloadBytes + break; + default: + break; + } + return sendToLagoonTasks(policyData.data.target+':misc', policyPayload); +} diff --git a/services/actions-handler/go.mod b/services/actions-handler/go.mod index eff685ffad..2c0ed44823 100644 --- a/services/actions-handler/go.mod +++ b/services/actions-handler/go.mod @@ -3,6 +3,7 @@ module github.com/uselagoon/lagoon/services/actions-handler go 1.21 require ( + github.com/aws/aws-sdk-go v1.15.11 github.com/cheshir/go-mq/v2 v2.0.1 github.com/uselagoon/machinery v0.0.17 gopkg.in/matryer/try.v1 v1.0.0-20150601225556-312d2599e12e @@ -11,11 +12,13 @@ require ( require ( github.com/NeowayLabs/wabbit v0.0.0-20210927194032-73ad61d1620e // indirect github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 // indirect + github.com/go-ini/ini v1.25.4 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/uuid v1.3.0 // indirect github.com/guregu/null v4.0.0+incompatible // indirect github.com/hashicorp/go-version v1.6.0 // indirect + github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect github.com/machinebox/graphql v0.2.3-0.20181106130121-3a9253180225 // indirect github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2 // indirect github.com/pborman/uuid v1.2.1 // indirect diff --git a/services/actions-handler/go.sum b/services/actions-handler/go.sum index 49235d16ed..c07d6906be 100644 --- a/services/actions-handler/go.sum +++ b/services/actions-handler/go.sum @@ -109,6 +109,7 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11 h1:m45+Ru/wA+73cOZXiEGLDH2d9uLN3iHqMc0/z4noDXE= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -300,6 +301,7 @@ github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -360,6 +362,7 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -489,6 +492,7 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -546,6 +550,7 @@ github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9 github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -559,6 +564,7 @@ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -724,6 +730,7 @@ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= @@ -786,8 +793,10 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= @@ -822,6 +831,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1029,6 +1039,7 @@ golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1170,6 +1181,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1410,6 +1422,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= diff --git a/services/actions-handler/handler/action_retention.go b/services/actions-handler/handler/action_retention.go new file mode 100644 index 0000000000..87fa8c4c8c --- /dev/null +++ b/services/actions-handler/handler/action_retention.go @@ -0,0 +1,157 @@ +package handler + +import ( + "context" + "encoding/json" + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + mq "github.com/cheshir/go-mq/v2" + "github.com/uselagoon/machinery/utils/namespace" +) + +type S3RetentionCleanUp struct { + EnvironmentName string `json:"environmentName"` + ProjectName string `json:"projectName"` + Task struct { + ID string `json:"id"` + } `json:"task"` + EnvironmentID int `json:"environmentId"` + ProjectID int `json:"projectId"` + BuildName string `json:"buildName"` + RemoteID string `json:"remoteId"` +} + +func (m *Messenger) handleRetention(ctx context.Context, messageQueue *mq.MessageQueue, action *Action, messageID string) error { + prefix := fmt.Sprintf("(messageid:%s) %s: ", messageID, action.EventType) + data, _ := json.Marshal(action.Data) + retention := S3RetentionCleanUp{} + json.Unmarshal(data, &retention) + switch action.EventType { + case "taskCleanup": + filePath := fmt.Sprintf("tasklogs/%s/%s-%s.txt", + retention.ProjectName, + retention.Task.ID, + retention.RemoteID, + ) + if retention.EnvironmentName != "" { + filePath = fmt.Sprintf("tasklogs/%s/%s/%s-%s.txt", + retention.ProjectName, + namespace.ShortenEnvironment(retention.ProjectName, namespace.MakeSafe(retention.EnvironmentName)), + retention.Task.ID, + retention.RemoteID, + ) + } + // clean up any files/attachments the task may have uploaded into it + err := m.deleteFileInDirS3( + prefix, + fmt.Sprintf("tasks/%s", + retention.Task.ID, + ), + retention, + ) + if err != nil { + log.Println(fmt.Sprintf("%sError: %v", prefix, err)) + return err + } + // handle cleaning up task logs + err = m.deleteFileS3( + prefix, + filePath, + retention, + ) + if err != nil { + log.Println(fmt.Sprintf("%sError: %v", prefix, err)) + return err + } + case "buildCleanup": + // handle cleaning up build logs + err := m.deleteFileS3( + prefix, + fmt.Sprintf("buildlogs/%s/%s/%s-%s.txt", + retention.ProjectName, + retention.EnvironmentName, + retention.BuildName, + retention.RemoteID, + ), + retention, + ) + if err != nil { + log.Println(fmt.Sprintf("%sError: %v", prefix, err)) + return err + } + } + return nil +} + +// deleteFileS3 +func (m *Messenger) deleteFileS3(prefix, fileName string, retention S3RetentionCleanUp) error { + var forcePath bool + forcePath = true + session, err := session.NewSession(&aws.Config{ + Region: aws.String(m.S3Configuration.S3FilesRegion), + Endpoint: aws.String(m.S3Configuration.S3FilesOrigin), + Credentials: credentials.NewStaticCredentials(m.S3Configuration.S3FilesAccessKeyID, m.S3Configuration.S3FilesSecretAccessKey, ""), + S3ForcePathStyle: &forcePath, + }) + if err != nil { + return err + } + + object := s3.DeleteObjectInput{ + Bucket: aws.String(m.S3Configuration.S3FilesBucket), + Key: aws.String(fileName), + } + _, err = s3.New(session).DeleteObject(&object) + if err != nil { + return err + } + if m.EnableDebug { + log.Println(fmt.Sprintf("%sDeleted file %s for environment: %v, id: %v", prefix, fileName, retention.EnvironmentName, retention.EnvironmentID)) + } + return nil +} + +// deleteDirFileS3 +func (m *Messenger) deleteFileInDirS3(prefix, fileName string, retention S3RetentionCleanUp) error { + var forcePath bool + forcePath = true + session, err := session.NewSession(&aws.Config{ + Region: aws.String(m.S3Configuration.S3FilesRegion), + Endpoint: aws.String(m.S3Configuration.S3FilesOrigin), + Credentials: credentials.NewStaticCredentials(m.S3Configuration.S3FilesAccessKeyID, m.S3Configuration.S3FilesSecretAccessKey, ""), + S3ForcePathStyle: &forcePath, + }) + if err != nil { + return err + } + listobject := s3.ListObjectsInput{ + Bucket: aws.String(m.S3Configuration.S3FilesBucket), + Prefix: aws.String(fileName), + } + s := s3.New(session) + err = s.ListObjectsPages(&listobject, func(page *s3.ListObjectsOutput, lastPage bool) bool { + for _, c := range page.Contents { + _, err := s.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(m.S3Configuration.S3FilesBucket), + Key: c.Key, + }) + if err != nil { + log.Println(fmt.Sprintf("%sError deleting file %s for environment: %v, id: %v: %v", prefix, *c.Key, retention.EnvironmentName, retention.EnvironmentID, err)) + continue // try other files + } + if m.EnableDebug { + log.Println(fmt.Sprintf("%sDeleted file %s for environment: %v, id: %v", prefix, *c.Key, retention.EnvironmentName, retention.EnvironmentID)) + } + } + return *page.IsTruncated + }) + if err != nil { + return err + } + return nil +} diff --git a/services/actions-handler/handler/handler.go b/services/actions-handler/handler/handler.go index 44bfca187f..cd66408e85 100644 --- a/services/actions-handler/handler/handler.go +++ b/services/actions-handler/handler/handler.go @@ -23,6 +23,16 @@ type LagoonAPI struct { JWTIssuer string `json:"issuer"` } +// S3Configuration . +type S3Configuration struct { + S3FilesAccessKeyID string + S3FilesSecretAccessKey string + S3FilesBucket string + S3FilesRegion string + S3FilesOrigin string + S3IsGCS bool +} + // Action is the structure of an action that is received via the message queue. type Action struct { Type string `json:"type"` // defines the action type @@ -40,6 +50,7 @@ type messenger interface { type Messenger struct { Config mq.Config LagoonAPI LagoonAPI + S3Configuration S3Configuration ConnectionAttempts int ConnectionRetryInterval int ActionsQueueName string @@ -48,10 +59,11 @@ type Messenger struct { } // New returns a messaging with config -func New(config mq.Config, lagoonAPI LagoonAPI, startupAttempts int, startupInterval int, actionsQueueName, controllerQueueName string, enableDebug bool) *Messenger { +func New(config mq.Config, lagoonAPI LagoonAPI, s3Configuration S3Configuration, startupAttempts int, startupInterval int, actionsQueueName, controllerQueueName string, enableDebug bool) *Messenger { return &Messenger{ Config: config, LagoonAPI: lagoonAPI, + S3Configuration: s3Configuration, ConnectionAttempts: startupAttempts, ConnectionRetryInterval: startupInterval, ActionsQueueName: actionsQueueName, @@ -112,6 +124,10 @@ func (m *Messenger) Consumer() { // and perform the steps to run the mutation against the lagoon api case "deployEnvironmentLatest": err = m.handleDeployEnvironment(ctx, messageQueue, action, messageID) + // check if this a `retentionCleanup` type of action + // and perform the steps to clean up anything related to the retention clean up event type + case "retentionCleanup": + err = m.handleRetention(ctx, messageQueue, action, messageID) } // if there aren't any errors, then ack the message, an error indicates that there may have been an issue with the api handling the request // skipping this means the message will remain in the queue diff --git a/services/actions-handler/main.go b/services/actions-handler/main.go index 1eab5d3114..788a191e1c 100644 --- a/services/actions-handler/main.go +++ b/services/actions-handler/main.go @@ -32,6 +32,13 @@ var ( controllerExchange string jwtSubject string jwtIssuer string + + s3FilesAccessKeyID string + s3FilesSecretAccessKey string + s3FilesBucket string + s3FilesRegion string + s3FilesOrigin string + s3isGCS bool ) func main() { @@ -69,6 +76,21 @@ func main() { "The name of the queue in rabbitmq to use.") flag.StringVar(&controllerExchange, "controller-exchange", "lagoon-tasks", "The name of the exchange in rabbitmq to use.") + + // S3 configuration + flag.StringVar(&s3FilesAccessKeyID, "s3-files-access-key", "minio", + "The S3 files access key.") + flag.StringVar(&s3FilesSecretAccessKey, "s3-files-secret-access-key", "minio123", + "The S3 files secret access key.") + flag.StringVar(&s3FilesBucket, "s3-files-bucket", "lagoon-files", + "The S3 files bucket.") + flag.StringVar(&s3FilesRegion, "s3-files-region", "auto", + "The S3 files region.") + flag.StringVar(&s3FilesOrigin, "s3-files-origin", "http://minio.127.0.0.1.nip.io:9000", + "The S3 files origin.") + flag.BoolVar(&s3isGCS, "s3-google-cloud", false, + "If the storage backend is google cloud.") + flag.Parse() // get overrides from environment variables @@ -87,6 +109,13 @@ func main() { controllerQueueName = variables.GetEnv("CONTROLLER_QUEUE_NAME", controllerQueueName) controllerExchange = variables.GetEnv("CONTROLLER_EXCHANGE", controllerExchange) + s3FilesAccessKeyID = variables.GetEnv("S3_FILES_ACCESS_KEY_ID", s3FilesAccessKeyID) + s3FilesSecretAccessKey = variables.GetEnv("S3_FILES_SECRET_ACCESS_KEY", s3FilesSecretAccessKey) + s3FilesBucket = variables.GetEnv("S3_FILES_BUCKET", s3FilesBucket) + s3FilesRegion = variables.GetEnv("S3_FILES_REGION", s3FilesRegion) + s3FilesOrigin = variables.GetEnv("S3_FILES_HOST", s3FilesOrigin) + s3isGCS = variables.GetEnvBool("S3_FILES_GCS", s3isGCS) + enableDebug := true graphQLConfig := handler.LagoonAPI{ @@ -97,6 +126,15 @@ func main() { JWTIssuer: jwtIssuer, } + s3Config := handler.S3Configuration{ + S3FilesAccessKeyID: s3FilesAccessKeyID, + S3FilesSecretAccessKey: s3FilesSecretAccessKey, + S3FilesBucket: s3FilesBucket, + S3FilesRegion: s3FilesRegion, + S3FilesOrigin: s3FilesOrigin, + S3IsGCS: s3isGCS, + } + log.Println("actions-handler running") config := mq.Config{ @@ -205,6 +243,7 @@ func main() { messenger := handler.New(config, graphQLConfig, + s3Config, startupConnectionAttempts, startupConnectionInterval, actionsQueueName, diff --git a/services/api/database/migrations/20240408000000_retention_policy.js b/services/api/database/migrations/20240408000000_retention_policy.js new file mode 100644 index 0000000000..0ff5ba450c --- /dev/null +++ b/services/api/database/migrations/20240408000000_retention_policy.js @@ -0,0 +1,31 @@ +/** + * @param { import("knex").Knex } knex + * @returns { Promise } + */ +exports.up = async function(knex) { + return knex.schema + .createTable('retention_policy', function (table) { + table.increments('id').notNullable().primary(); + table.string('name', 300).unique({indexName: 'name'}); + table.enu('type',['harbor','history']).notNullable(); + table.text('configuration'); + table.timestamp('updated').notNullable().defaultTo(knex.fn.now()); + table.timestamp('created').notNullable().defaultTo(knex.fn.now()); + }) + .createTable('retention_policy_reference', function (table) { + table.integer('retention_policy'); + table.enu('scope',['global','organization','project']).notNullable(); + table.integer('id'); + table.unique(['retention_policy', 'scope', 'id'], {indexName: 'organization_policy'}); + }) +}; + +/** + * @param { import("knex").Knex } knex + * @returns { Promise } + */ +exports.down = async function(knex) { + return knex.schema + .dropTable('retention_policy') + .dropTable('retention_policy_reference') +}; \ No newline at end of file diff --git a/services/api/src/models/retentionpolicy.ts b/services/api/src/models/retentionpolicy.ts new file mode 100644 index 0000000000..3e6c74eecf --- /dev/null +++ b/services/api/src/models/retentionpolicy.ts @@ -0,0 +1,102 @@ +import { logger } from "../loggers/logger" + +export interface HarborRetentionPolicy { + enabled: boolean + branchRetention: number + pullrequestRetention: number + schedule: string +} + +export interface HistoryRetentionPolicy { + enabled: boolean + deploymentHistory: number + taskHistory: number +} + +export const RetentionPolicy = () => { + const convertHarborRetentionPolicyToJSON = async ( + harbor: HarborRetentionPolicy + ): Promise => { + const c = JSON.stringify(harbor) + return c + }; + + const convertHistoryRetentionPolicyToJSON = async ( + history: HistoryRetentionPolicy + ): Promise => { + const c = JSON.stringify(history) + return c + }; + + const convertJSONToHarborRetentionPolicy = async ( + configuration: string + ): Promise => { + const c = JSON.parse(configuration) + if (typeof c.enabled != "boolean") { + throw new Error("enabled must be a boolean"); + } + if (typeof c.branchRetention != "number") { + throw new Error("branchRetention must be a number"); + } + if (typeof c.pullrequestRetention != "number") { + throw new Error("pullrequestRetention must be a number"); + } + if (typeof c.schedule != "string") { + throw new Error("schedule must be a string"); + } + return c + }; + + const convertJSONToHistoryRetentionPolicy = async ( + configuration: string + ): Promise => { + const c = JSON.parse(configuration) + if (typeof c.enabled != "boolean") { + throw new Error("enabled must be a boolean"); + } + if (typeof c.deploymentHistory != "number") { + throw new Error("deploymentHistory must be a number"); + } + if (typeof c.taskHistory != "number") { + throw new Error("taskHistory must be a number"); + } + return c + }; + + // run the configuration patches through the validation process + const returnValidatedConfiguration = async (type: string, patch: any): Promise => { + const c = JSON.stringify(patch[type]) + switch (type) { + case "harbor": + try { + await convertJSONToHarborRetentionPolicy(c) + return c + } catch (e) { + throw new Error( + `Provided configuration is not valid for type ${type}: ${e}` + ); + } + case "history": + try { + await convertJSONToHistoryRetentionPolicy(c) + return c + } catch (e) { + throw new Error( + `Provided configuration is not valid for type ${type}: ${e}` + ); + } + default: + throw new Error( + `Provided configuration is not valid for type ${type}` + ); + } + } + + return { + convertHarborRetentionPolicyToJSON, + convertHistoryRetentionPolicyToJSON, + convertJSONToHarborRetentionPolicy, + convertJSONToHistoryRetentionPolicy, + returnValidatedConfiguration + }; +}; \ No newline at end of file diff --git a/services/api/src/resolvers.js b/services/api/src/resolvers.js index 8c192b3174..830527d6a2 100644 --- a/services/api/src/resolvers.js +++ b/services/api/src/resolvers.js @@ -289,6 +289,17 @@ const { getRestoreLocation, } = require('./resources/backup/resolvers'); +const { + createRetentionPolicy, + updateRetentionPolicy, + deleteRetentionPolicy, + getRetentionPoliciesByProjectId, + getRetentionPoliciesByOrganizationId, + listRetentionPolicies, + addRetentionPolicyLink, + removeRetentionPolicyLink, +} = require('./resources/retentionpolicy/resolvers'); + const { getEnvVarsByProjectId, getEnvVarsByEnvironmentId, @@ -390,6 +401,20 @@ const resolvers = { ACTIVE: 'active', SUCCEEDED: 'succeeded', }, + RetentionPolicyType: { + HARBOR: 'harbor', + HISTORY: 'history', + }, + RetentionPolicyScope: { + GLOBAL: 'global', + ORGANIZATION: 'organization', + PROJECT: 'project', + }, + HistoryRetentionType: { + COUNT: 'count', + DAYS: 'days', + MONTHS: 'months', + }, Openshift: { projectUser: getProjectUser, token: getToken, @@ -412,6 +437,7 @@ const resolvers = { groups: getGroupsByProjectId, privateKey: getPrivateKey, publicKey: getProjectDeployKey, + retentionPolicies: getRetentionPoliciesByProjectId, }, GroupInterface: { __resolveType(group) { @@ -462,12 +488,14 @@ const resolvers = { environments: getEnvironmentsByOrganizationId, owners: getOwnersByOrganizationId, deployTargets: getDeployTargetsByOrganizationId, - notifications: getNotificationsByOrganizationId + notifications: getNotificationsByOrganizationId, + retentionPolicies: getRetentionPoliciesByOrganizationId }, OrgProject: { groups: getGroupsByOrganizationsProject, groupCount: getGroupCountByOrganizationProject, notifications: getNotificationsForOrganizationProjectId, + retentionPolicies: getRetentionPoliciesByProjectId, }, OrgEnvironment: { project: getProjectById, @@ -515,6 +543,18 @@ const resolvers = { } } }, + RetentionPolicyConfiguration: { + __resolveType(obj) { + switch (obj.type) { + case 'harbor': + return 'HarborRetentionPolicy'; + case 'history': + return 'HistoryRetentionPolicy'; + default: + return null; + } + } + }, AdvancedTaskDefinition: { __resolveType (obj) { switch(obj.type) { @@ -595,7 +635,8 @@ const resolvers = { getGroupProjectOrganizationAssociation, getProjectGroupOrganizationAssociation, getEnvVariablesByProjectEnvironmentName, - checkBulkImportProjectsAndGroupsToOrganization + checkBulkImportProjectsAndGroupsToOrganization, + listRetentionPolicies }, Mutation: { addProblem, @@ -730,7 +771,12 @@ const resolvers = { removeUserFromOrganizationGroups, bulkImportProjectsAndGroupsToOrganization, addOrUpdateEnvironmentService, - deleteEnvironmentService + deleteEnvironmentService, + createRetentionPolicy, + updateRetentionPolicy, + deleteRetentionPolicy, + addRetentionPolicyLink, + removeRetentionPolicyLink }, Subscription: { backupChanged: backupSubscriber, diff --git a/services/api/src/resources/backup/sql.ts b/services/api/src/resources/backup/sql.ts index 1af63dc86f..56a29578c6 100644 --- a/services/api/src/resources/backup/sql.ts +++ b/services/api/src/resources/backup/sql.ts @@ -56,7 +56,7 @@ export const Sql = { deleteBackup: (backupId: string) => knex('environment_backup') .where('backup_id', backupId) - .update({ deleted: knex.fn.now() }) + .del() // actually delete the backup, there is no real reason to retain this information, the snapshot is gone .toString(), truncateBackup: () => knex('environment_backup') diff --git a/services/api/src/resources/deployment/resolvers.ts b/services/api/src/resources/deployment/resolvers.ts index 8aa239b80c..e1618c6b17 100644 --- a/services/api/src/resources/deployment/resolvers.ts +++ b/services/api/src/resources/deployment/resolvers.ts @@ -18,6 +18,8 @@ import { knex, query, isPatchEmpty } from '../../util/db'; import { Sql } from './sql'; import { Helpers } from './helpers'; import { Helpers as environmentHelpers } from '../environment/helpers'; +import { Helpers as retentionHelpers } from '../retentionpolicy/helpers'; +import { HistoryRetentionEnforcer } from '../retentionpolicy/history'; import { Helpers as projectHelpers } from '../project/helpers'; import { addTask } from '@lagoon/commons/dist/api'; import { Sql as environmentSql } from '../environment/sql'; @@ -342,6 +344,10 @@ export const addDeployment: ResolverFn = async ( if (!sourceType) { sourceType = "API" } + const projectData = await projectHelpers(sqlClientPool).getProjectById( + environment.project + ); + const { insertId } = await query( sqlClientPool, Sql.insertDeployment({ @@ -361,10 +367,12 @@ export const addDeployment: ResolverFn = async ( sourceUser, }) ); - const rows = await query(sqlClientPool, Sql.selectDeployment(insertId)); const deployment = R.prop(0, rows); + // pass to the HistoryRetentionEnforcer to clean up deployments based on any retention policies + await HistoryRetentionEnforcer().cleanupDeployments(projectData, environment) + pubSub.publish(EVENTS.DEPLOYMENT, deployment); return deployment; }; @@ -380,13 +388,28 @@ export const deleteDeployment: ResolverFn = async ( project: R.path(['0', 'pid'], perms) }); + const deployment = await Helpers(sqlClientPool).getDeploymentById(id) + + if (!deployment) { + throw new Error( + `Invalid deployment input` + ); + } + + const environmentData = await environmentHelpers(sqlClientPool).getEnvironmentById(parseInt(deployment.environment)); + const projectData = await projectHelpers(sqlClientPool).getProjectById(environmentData.project); + await query(sqlClientPool, Sql.deleteDeployment(id)); + // pass the deployment to the HistoryRetentionEnforcer + await HistoryRetentionEnforcer().cleanupDeployment(projectData, environmentData, deployment) + userActivityLogger(`User deleted deployment '${id}'`, { project: '', event: 'api:deleteDeployment', payload: { - deployment: id + deployment: id, + deploymentName: deployment.name } }); @@ -425,9 +448,10 @@ export const updateDeployment: ResolverFn = async ( Sql.selectPermsForDeployment(id) ); + const projectId = R.path(['0', 'pid'], permsDeployment) // Check access to modify deployment as it currently stands await hasPermission('deployment', 'update', { - project: R.path(['0', 'pid'], permsDeployment) + project: projectId }); if (environment) { @@ -465,6 +489,13 @@ export const updateDeployment: ResolverFn = async ( pubSub.publish(EVENTS.DEPLOYMENT, deployment); + try { + // handle retention policy hooks + await retentionHelpers(sqlClientPool).postDeploymentProjectPolicyHook(parseInt(projectId.toString(), 10), status) + } catch (e) { + logger.warn(`failed to perform postDeploymentProjectPolicyHook: ${e}`) + } + userActivityLogger(`User updated deployment '${id}'`, { project: '', event: 'api:updateDeployment', diff --git a/services/api/src/resources/deployment/sql.ts b/services/api/src/resources/deployment/sql.ts index e84e942d36..d22b8602be 100644 --- a/services/api/src/resources/deployment/sql.ts +++ b/services/api/src/resources/deployment/sql.ts @@ -74,5 +74,57 @@ export const Sql = { .select({ pid: 'environment.project' }) .join('environment', 'deployment.environment', '=', 'environment.id') .where('deployment.id', id) - .toString() + .toString(), + // this selects all deployments for the environment and returns everything outside of the requested retain value + selectDeploymentHistoryRetention: (environment: number, retain: number) => + knex.raw(`SELECT id, name, remote_id FROM deployment + WHERE environment=`+environment+` AND id NOT IN ( + SELECT id + FROM ( + SELECT id + FROM deployment + WHERE environment=`+environment+` + ORDER BY id DESC + LIMIT `+retain+` + ) d + );`) + .toString(), + // this selects all tasks for the environment and returns everything outside of the requested retain days value + selectDeploymentHistoryRetentionDays: (environment: number, retain: number) => + knex.raw(`SELECT id, name, remote_id FROM deployment WHERE environment=`+environment+` AND created >= NOW() - INTERVAL `+retain+` DAY;`) + .toString(), + // this selects all tasks for the environment and returns everything outside of the requested retain months value + selectDeploymentHistoryRetentionMonths: (environment: number, retain: number) => + knex.raw(`SELECT id, name, remote_id FROM deployment WHERE environment=`+environment+` AND created >= NOW() - INTERVAL `+retain+` MONTH;`) + .toString(), + // this selects all tasks for the environment and returns everything + selectDeploymentHistoryForEnvironment: (environment: number) => + knex.raw(`SELECT id, name, remote_id FROM deployment WHERE environment=`+environment+`;`) + .toString(), + // same as select, except it deletes all deployments for the environment outside of the requested retain value + deleteDeploymentHistory: (environment: number, retain: number) => + knex.raw(`DELETE FROM deployment + WHERE environment=`+environment+` AND id NOT IN ( + SELECT id + FROM ( + SELECT id + FROM deployment + WHERE environment=`+environment+` + ORDER BY id DESC + LIMIT `+retain+` + ) d + );`) + .toString(), + // same as select, except it deletes all tasks for the environment outside of the requested retain value + deleteDeploymentHistoryDays: (environment: number, retain: number) => + knex.raw(`DELETE FROM deployment WHERE environment=`+environment+` AND created >= NOW() - INTERVAL `+retain+` DAY;`) + .toString(), + // same as select, except it deletes all tasks for the environment outside of the requested retain value + deleteDeploymentHistoryMonths: (environment: number, retain: number) => + knex.raw(`DELETE FROM deployment WHERE environment=`+environment+` AND created >= NOW() - INTERVAL `+retain+` MONTH;`) + .toString(), + // delete all deployments for environment + deleteDeploymentHistoryForEnvironment: (environment: number) => + knex.raw(`DELETE FROM deployment WHERE environment=`+environment+`;`) + .toString(), }; diff --git a/services/api/src/resources/environment/helpers.ts b/services/api/src/resources/environment/helpers.ts index 201b285f37..aa731bb88d 100644 --- a/services/api/src/resources/environment/helpers.ts +++ b/services/api/src/resources/environment/helpers.ts @@ -6,7 +6,10 @@ import { Sql } from './sql'; import { Sql as problemSql } from '../problem/sql'; import { Sql as factSql } from '../fact/sql'; import { Helpers as projectHelpers } from '../project/helpers'; -// import { logger } from '../../loggers/logger'; +import { Sql as deploymentSql } from '../deployment/sql'; +import { Sql as taskSql } from '../task/sql'; +import { HistoryRetentionEnforcer } from '../retentionpolicy/history'; +import { logger } from '../../loggers/logger'; export const Helpers = (sqlClientPool: Pool) => { const aliasOpenshiftToK8s = (environments: any[]) => { @@ -31,6 +34,8 @@ export const Helpers = (sqlClientPool: Pool) => { aliasOpenshiftToK8s, getEnvironmentById, deleteEnvironment: async (name: string, eid: number, pid: number) => { + const environmentData = await Helpers(sqlClientPool).getEnvironmentById(eid); + const projectData = await projectHelpers(sqlClientPool).getProjectById(pid); // clean up environment variables // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid} environment variables`) await query( @@ -43,7 +48,18 @@ export const Helpers = (sqlClientPool: Pool) => { sqlClientPool, Sql.deleteServices(eid) ); - // @TODO: environment_storage, deployment, environment_backup, task, environment_problem, environment_fact + // @TODO: environment_storage, environment_backup, environment_problem, environment_fact + // purge all history for this environment, including logs and files from s3 + try { + await HistoryRetentionEnforcer().cleanupAllDeployments(projectData, environmentData) // remove all deployments and associated files + } catch (e) { + logger.error(`error running deployment retention enforcer: ${e}`) + } + try { + await HistoryRetentionEnforcer().cleanupAllTasks(projectData, environmentData) // remove all tasks and associated files + } catch (e) { + logger.error(`error running task retention enforcer: ${e}`) + } // delete the environment // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid}`) await query( diff --git a/services/api/src/resources/organization/helpers.ts b/services/api/src/resources/organization/helpers.ts index 33200337a1..8eff7b1178 100644 --- a/services/api/src/resources/organization/helpers.ts +++ b/services/api/src/resources/organization/helpers.ts @@ -7,6 +7,10 @@ export const Helpers = (sqlClientPool: Pool) => { const getOrganizationById = async (id: number) => { const rows = await query(sqlClientPool, Sql.selectOrganization(id)); return R.prop(0, rows); + } + const getOrganizationByName = async (name: string) => { + const rows = await query(sqlClientPool, Sql.selectOrganizationByName(name)); + return R.prop(0, rows); }; const getProjectsByOrganizationId = async (id: number) => { const rows = await query(sqlClientPool, Sql.selectOrganizationProjects(id)); @@ -60,6 +64,7 @@ export const Helpers = (sqlClientPool: Pool) => { }; return { getOrganizationById, + getOrganizationByName, getProjectsByOrganizationId, getDeployTargetsByOrganizationId, getNotificationsForOrganizationId, diff --git a/services/api/src/resources/organization/sql.ts b/services/api/src/resources/organization/sql.ts index d05fca12dd..c58f71dfaa 100644 --- a/services/api/src/resources/organization/sql.ts +++ b/services/api/src/resources/organization/sql.ts @@ -82,6 +82,11 @@ export const Sql = { knex('project') .where('organization', '=', id) .toString(), + selectOrganizationProjectIds: (id: number) => + knex('project') + .select(knex.raw('group_concat(id) as project_ids')) + .where('organization', '=', id) + .toString(), selectOrganizationEnvironments: (id: number) => knex('organization') .select('e.*') diff --git a/services/api/src/resources/project/sql.ts b/services/api/src/resources/project/sql.ts index 5f247989dd..724e551d6d 100644 --- a/services/api/src/resources/project/sql.ts +++ b/services/api/src/resources/project/sql.ts @@ -16,6 +16,12 @@ export const Sql = { .whereNotIn('id', ids) .orderBy('id', 'asc') .toString(), + selectAllProjectIDsNotIn: (ids) => + knex('project') + .select(knex.raw('group_concat(id) as project_ids')) + .whereNotIn('id', ids) + .orderBy('id', 'asc') + .toString(), selectAllProjectsIn: (ids: number) => knex('project') .select('id') diff --git a/services/api/src/resources/retentionpolicy/PAYLOADS.md b/services/api/src/resources/retentionpolicy/PAYLOADS.md new file mode 100644 index 0000000000..8345b6e988 --- /dev/null +++ b/services/api/src/resources/retentionpolicy/PAYLOADS.md @@ -0,0 +1,73 @@ + + +# harbor remote event payloads + +When a harbor policy is enforced, a message will be sent to the remote clusters, the payload sent will be one of the following depending on if the policy is being added, updated, or removed. + +## add or update policy + +Policy addition or updating will contain the policy information and the project information. The remote-controller uses this and the `eventType` to add or update the policy in the harbor that is associated to the project in each remote. + +``` +{ + "key":"deploytarget:harborpolicy:update", + "misc":{ + "miscResource":"eyJ0eXBlIjoiaGFyYm9yUmV0ZW50aW9uUG9saWN5IiwiZXZlbnRUeXBlIjoidXBkYXRlUG9saWN5IiwiZGF0YSI6eyJwcm9qZWN0Ijp7ImlkIjoxODAsIm5hbWUiOiJsYWdvb24tZGVtby1vcmcifSwicG9saWN5Ijp7ImVuYWJsZWQiOnRydWUsInJ1bGVzIjpbeyJuYW1lIjoiYWxsIGJyYW5jaGVzLCBleGNsdWRpbmcgcHVsbHJlcXVlc3RzIiwicGF0dGVybiI6IltecHJcXC1dKi8qIiwibGF0ZXN0UHVsbGVkIjozfSx7Im5hbWUiOiJwdWxscmVxdWVzdHMiLCJwYXR0ZXJuIjoicHItKiIsImxhdGVzdFB1bGxlZCI6MX1dLCJzY2hlZHVsZSI6IjMgMyAqICogMyJ9fX0" + } +} +``` +And the decoded `miscResource` payload is structured like this, based on the type `HarborRetentionMessage`: +``` +{ + "type": "harborRetentionPolicy", + "eventType": "updatePolicy", + "data": { + "project": { + "id": 180, + "name": "lagoon-demo-org" + }, + "policy": { + "enabled": true, + "rules": [ + { + "name": "all branches, excluding pullrequests", + "pattern": "[^pr\\-]*/*", + "latestPulled": 3 + }, + { + "name": "pullrequests", + "pattern": "pr-*", + "latestPulled": 1 + } + ], + "schedule": "3 3 * * 3" + } + } +} +``` + +## remove policy + +Policy removal contains just the project information, the remote-controller uses this and the `eventType` to remove the policy from the associated project in harbor. + +``` +{ + "key":"deploytarget:harborpolicy:update", + "misc":{ + "miscResource":"eyJ0eXBlIjoiaGFyYm9yUmV0ZW50aW9uUG9saWN5IiwiZXZlbnRUeXBlIjoicmVtb3ZlUG9saWN5IiwiZGF0YSI6eyJwcm9qZWN0Ijp7ImlkIjoxODAsIm5hbWUiOiJsYWdvb24tZGVtby1vcmcifX19" + } +} +``` +And the decoded `miscResource` payload is structured like this, based on the type `HarborRetentionMessage`: +``` +{ + "type": "harborRetentionPolicy", + "eventType": "removePolicy", + "data": { + "project": { + "id": 180, + "name": "lagoon-demo-org" + } + } +} +``` \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/README.md b/services/api/src/resources/retentionpolicy/README.md new file mode 100644 index 0000000000..355318c0cd --- /dev/null +++ b/services/api/src/resources/retentionpolicy/README.md @@ -0,0 +1,134 @@ +# retentionpolicy + +# policy types + +Policy types are used to influence different parts of retention within Lagoon, this means it is possible to add retention policies that target specific areas for retention. + +Policies can be applied to the following scopes: `global`, `organization`, and `project`. + +If applied to the `global` scope, ALL projects will receive the policy. If a policy is then added to an `organization` scope, then this policy will override anything at the `global` scope, and then the same for `project` scope would override the other two. + +Policies are created as a top level resource, meaning they can exist but not be assigned or used. An administrator can then link a policy to a scope. The act of linking a policy to a scope is what will turn the policy on (if the state is enabled) + +A policy of one type can only be applied to a scope at a single time. For example, if a `harbor` policy is applied to the global scope, then you cannot add another `harbor` policy to the global scope. + +# policy enforcement + +Different policy types will have different methods of enforcement. See enforcement for each policy type below. + +## harbor + +This policy type is for managing how many images pushed to harbor are retained. This is a simplified version of what harbor offers that will work for images that Lagoon pushes into projects. + +The configuration options for harbor retention policies are +* enabled - the state of the policy +* rules - a list of rules to apply (or based) + * name - the name of the rule + * pattern - the pattern to use, this is based on doublestar path pattern matching and globbing (harbor uses this https://github.com/bmatcuk/doublestar#patterns) + * latestPulled - the number of images to retain for this rule +* schedule - how often to run this retention policy in harbor (this schedule is executed by harbor, not lagoon) + +> Note: changing a policy from `enabled: true` to `enabled: false` will remove the policy from anything it may be associated to. this is a way to set a global (or organization) policy and allow an organization (or project) policy to disable it. + +### enforcement + +harbor policies when they are linked, unlinked, or updated, are sent to deploytargets to pass on to the harbor defined in that deploytarget. + +For example, if a harbor policy is linked to a scope, a hook is executed which will work out, based on the scope, which deploytargets need to be informed of the new policy. + +If there exists a global scoped harbor policy, and a new organization based policy is created and linked to an organization. The policy enforcer will work out which deploytargets any projects within that organization need to be informed of this new policy and send messages to them so they update the policy in their respective harbors. +If the organization based policy is removed from the organization, then the enforcer will send a message to all of the projects in that organization again to inform them to revert back to the global policy. The same actions are performed if the policy would be applied to a project scope. + +### creating a harbor policy + +``` +mutation createHarborPolicy { + createRetentionPolicy(input:{ + name: "custom-harbor-policy" + type: HARBOR + harbor: { + enabled: true + rules: [ + { + name: "all branches, excluding pullrequests" + pattern: "[^pr\\-]*/*" + latestPulled: 3 + }, + { + name: "pullrequests" + pattern: "pr-*" + latestPulled: 1 + } + ] + schedule: "3 3 * * 3" + } + }) { + id + name + configuration { + ... on HarborRetentionPolicy { + enabled + rules { + name + pattern + latestPulled + } + schedule + } + } + type + created + updated + } +} +``` + +For information or examples of the payloads that the harbor policy enforcement sends, see `PAYLOADS.md` + +## history + +This policy type will trim down the number of items that are retained in an environments history. + +The configuration options for history are +* enabled - the state of the policy +* deploymentType - can be one of `COUNT`, `DAYS`, `MONTHS` +* deploymentHistory - depending on the type selected, will retain deployments (logs, status, etc...) to this number accordingly +* taskType - can be one of `COUNT`, `DAYS`, `MONTHS` +* taskHistory - depending on the type selected, will retain task history (logs, status, etc...) to this number accordingly + +### enforcement + +history policies are enforced on demand. For example, when a new task or deployment is triggered, a hook is called that will check if the environment needs to enforce the policy or not based on the policy configuration. + +### creating a history policy + +``` +mutation createHistoryPolicy { + createRetentionPolicy(input:{ + name: "custom-history-policy" + type: HISTORY + history: { + enabled: true + deploymentHistory: 15 + deploymentType: DAYS + taskHistory: 3 + taskType: MONTHS + } + }) { + id + name + configuration { + ... on HistoryRetentionPolicy { + enabled + deploymentHistory + deploymentType + taskHistory + taskType + } + } + type + created + updated + } +} +``` \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/harbor.ts b/services/api/src/resources/retentionpolicy/harbor.ts new file mode 100644 index 0000000000..41680bdce2 --- /dev/null +++ b/services/api/src/resources/retentionpolicy/harbor.ts @@ -0,0 +1,72 @@ +import { logger } from "../../loggers/logger"; +import { query } from '../../util/db'; +import { Helpers } from './helpers'; +import { HarborRetentionMessage, HarborRetentionEventType, HarborRetentionMessageType } from './types'; +import { Sql as environmentSql } from '../environment/sql'; +import { Sql as openshiftSql } from '../openshift/sql'; +import { Sql as projectSql } from '../project/sql'; +import { createRetentionPolicyTask } from '@lagoon/commons/dist/tasks'; + +export const HarborRetentionEnforcer = () => { + const updateProjects = async (sqlClientPool, policyChanges: any) => { + // loop over all the policyChanges and get all the environments for the project, and the deploytargets environments are in + // send each deploytarget ID the policy change for the project so that the harbor in that deploytarget will + // get the updated retention policy changes immediately + for (const pol of policyChanges) { + const rows = await query(sqlClientPool, environmentSql.selectEnvironmentsByProjectId(null, pol.pid, false, false, [])); + if (rows.length > 0) { + let targets = [] + for (const row of rows) { + const deployTarget = await query(sqlClientPool, openshiftSql.selectOpenshift(row.openshift)); + if (targets.indexOf(deployTarget[0].name) === -1) { + targets.push(deployTarget[0].name); + } + } + const project = await query(sqlClientPool, projectSql.selectProjectById(pol.pid)) + for (const target of targets) { + if (pol.updatePolicy) { + const retpol = await Helpers(sqlClientPool).getRetentionPolicy(pol.rpid) + // add or update harbor policy to project in the remote cluster + const event: HarborRetentionMessage = { + type: HarborRetentionMessageType.HarborRetentionPolicy, + eventType: HarborRetentionEventType.UpdatePolicy, + data: { + project: { + id: project[0].id, + name: project[0].name, + }, + policy: retpol.configuration + } + } + const data = { + target: target, + event: event + } + await createRetentionPolicyTask({ key: 'harborpolicy:update', data }); + } + if (pol.removePolicy) { + // remove harbor policy from project in the remote cluster + const event: HarborRetentionMessage = { + type: HarborRetentionMessageType.HarborRetentionPolicy, + eventType: HarborRetentionEventType.RemovePolicy, + data: { + project: { + id: project[0].id, + name: project[0].name, + } + } + } + const data = { + target: target, + event: event + } + await createRetentionPolicyTask({ key: 'harborpolicy:update', data }); + } + } + } + } + } + return { + updateProjects, + }; +}; \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/helpers.ts b/services/api/src/resources/retentionpolicy/helpers.ts new file mode 100644 index 0000000000..fa8d8efae9 --- /dev/null +++ b/services/api/src/resources/retentionpolicy/helpers.ts @@ -0,0 +1,387 @@ +import * as R from 'ramda'; +import { Pool } from 'mariadb'; +import { asyncPipe } from '@lagoon/commons/dist/util/func'; +import { query } from '../../util/db'; +import { Sql } from './sql'; +import { Sql as organizationSql } from '../organization/sql'; +import { Sql as projectSql } from '../project/sql'; +import { logger } from '../../loggers/logger'; +import { Helpers as projectHelpers } from '../project/helpers'; +import { HarborRetentionEnforcer } from './harbor'; + +export const Helpers = (sqlClientPool: Pool) => { + const getRetentionPolicy = async (id: number) => { + const rows = await query(sqlClientPool, Sql.selectRetentionPolicyById(id)); + return R.prop(0, rows); + }; + const getRetentionPolicyByName = async (name: string) => { + const rows = await query(sqlClientPool, Sql.selectRetentionPolicyByName(name)); + return R.prop(0, rows); + }; + const getRetentionPolicyByTypeAndLink = async (type: string, sid: number, scope: string) => { + const rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByTypeAndLink(type, sid, scope)); + return R.prop(0, rows); // ? R.prop(0, rows) : null; + }; + const getRetentionPoliciesByTypePolicyIDAndLink = async (type: string, policyId: number, sid: number, scope: string) => { + const rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByTypePolicyIDAndLink(type, policyId, sid, scope)); + return rows; + }; + const getRetentionPoliciesByProjectWithType = async (type: string, project: number) => { + let rows = [] + if (type) { + rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByTypeAndLink(type, project, "project")); + } else { + rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByLink(project, "project")); + } + return rows; + }; + const getRetentionPoliciesByOrganizationWithType = async (type: string, organization: number) => { + let rows = [] + if (type) { + rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByTypeAndLink(type, organization, "organization")); + } else { + rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByLink(organization, "organization")); + } + return rows; + }; + const getRetentionPoliciesByGlobalWithType = async (type: string) => { + let rows = [] + if (type) { + rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByTypeAndLink(type, 0, "global")); + } else { + rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByLink(0, "global")); + } + return rows; + }; + const getRetentionPoliciesByScopeWithTypeAndLink = async (type: string, scope: string, scopeId: number) => { + let rows, gr, or, pr, orgRows = [] + const globalRows = await getRetentionPoliciesByGlobalWithType(type); + switch (scope) { + case "project": + const projectData = await projectHelpers(sqlClientPool).getProjectById(scopeId) + orgRows = await getRetentionPoliciesByOrganizationWithType(type, projectData.organization); + const pRows = await getRetentionPoliciesByProjectWithType(type, scopeId); + gr = globalRows.map(row => ({ ...row, source: "global", configuration: {type: row.type, ...JSON.parse(row.configuration)} })) + or = orgRows.map(row => ({ ...row, source: "organization", configuration: {type: row.type, ...JSON.parse(row.configuration)} })) + pr = pRows.map(row => ({ ...row, source: "project", configuration: {type: row.type, ...JSON.parse(row.configuration)} })) + if (pr.length == 0) { + rows = gr.filter(ar => !or.find(rm => (rm.type === ar.type ) )) + if (or.length != 0) { + rows = or.filter(ar => !pr.find(rm => (rm.type === ar.type ) )) + } + } else { + return pr + } + rows.push(...pr) + return rows; + case "organization": + orgRows = await getRetentionPoliciesByOrganizationWithType(type, scopeId); + gr = globalRows.map(row => ({ ...row, source: "global", configuration: {type: row.type, ...JSON.parse(row.configuration)} })) + or = orgRows.map(row => ({ ...row, source: "organization", configuration: {type: row.type, ...JSON.parse(row.configuration)} })) + rows = gr.filter(ar => !or.find(rm => (rm.type === ar.type ) )) + rows.push(...or) + return rows; + case "global": + return globalRows.map(row => ({ ...row, source: "global", configuration: {type: row.type, ...JSON.parse(row.configuration)} })) + default: + throw new Error( + `No matching scope` + ); + } + }; + const getDeployTargetsForRetentionPoliciesByProject = async (project: number) => { + const rows = await query(sqlClientPool, Sql.selectDeployTargetsForRetentionByProject(project)); + return rows; + }; + const getEnvironmentsForRetentionPoliciesByProject = async (project: number) => { + const rows = await query(sqlClientPool, Sql.selectEnvironmentsForRetentionByProject(project)); + return rows; + }; + /* + getProjectIdsForAssociatedPolicyID retrieves all project ids that have the associated policyid and type attached either globally, organizationally, or directly in the project + this is used to quickly figure out which projects need to be updated if the associated policy is modified + the data this generates should be in the format the `policyEnforcer` requires, see `policyEnforcer` for details + */ + const getProjectIdsForAssociatedPolicyID = async (type: string, policyId: number, removal: boolean) => { + let policyOverrides = [] // store all the policy overrides that this function will generate + let projects = [] // store all the collected project ids so that we can use it to select other projects not in this list later on + // this policy is applied globally, so check for any organizations or projects that may use this policy + // check if any organizations have a policy that is different to this updated policy, these should be excluded from receiving any updates + const oids = await query(sqlClientPool, Sql.selectScopeIDsByRetentionPolicyTypeExcludingPolicyID(type, "organization", policyId)); + if (oids.length != 0 && oids[0]["scopeIds"] != null) { + for (const oid of oids[0]["scopeIds"].split(',')) { + const opids = await query(sqlClientPool, organizationSql.selectOrganizationProjectIds(oid)) + if (opids[0]["projectIds"] != null) { + for (const pid of opids[0]["projectIds"].split(',')) { + projects.push(pid) + const d = await getRetentionPolicyByTypeAndLink(type, oid, "organization") + if (removal && d && d.id == policyId) { + const targetIndex = policyOverrides.findIndex(f=>f.pid===pid); + const policy = {pid: pid, updatePolicy: true, rpid: d.id} + if (targetIndex != -1) { + // if the project already exists in the overrides, but a change is determined to be made + // update the project with the new policy + policyOverrides[targetIndex] = policy; + } else { + // otherwise add the project and policy as a new item + policyOverrides.push(policy) + } + } + } + } + } + } + // check if any projects have a policy that is different to this updated policy, these should be excluded from receiving any updates + const pids = await query(sqlClientPool, Sql.selectScopeIDsByRetentionPolicyTypeExcludingPolicyID(type, "project", policyId)); + if (pids.length != 0 && pids[0]["scopeIds"] != null) { + for (const pid of pids[0]["scopeIds"].split(',')) { + projects.indexOf(pid) === -1 && projects.push(pid); + const d = await getRetentionPolicyByTypeAndLink(type, pid, "project") + if (removal && d && d.id == policyId) { + const targetIndex = policyOverrides.findIndex(f=>f.pid===pid); + const policy = {pid: pid, updatePolicy: true, rpid: d.id} + if (targetIndex != -1) { + // if the project already exists in the overrides, but a change is determined to be made + // update the project with the new policy + policyOverrides[targetIndex] = policy; + } else { + // otherwise add the project and policy as a new item + policyOverrides.push(policy) + } + } + } + } + // select all project ids that don't have a policy override + const updateProjects = await query(sqlClientPool, projectSql.selectAllProjectIDsNotIn(projects)) + if (updateProjects[0]["projectIds"] != null) { + const projects = updateProjects[0]["projectIds"].split(','); + for (const pid of projects) { + if (removal) { + // if the project has no other policies to apply + // then it need to have any policies that may have been attached to it, removed from it + // set that here for the policyEnforcer to consume + policyOverrides.push({pid: pid, removePolicy: true}) + } else { + policyOverrides.push({pid: pid, updatePolicy: true, rpid: policyId}) + } + } + } + // all of these project ids should get an update as long as the policy type requires immediate update changes + return policyOverrides + } + /* + getRetentionPolicyChangesRequiredByScope generates a list of project ids and the associated policy id that should be attached to this project + the data this generates should be in the format the `policyEnforcer` requires, see `policyEnforcer` for details + */ + const getRetentionPolicyChangesRequired = async (scopeId: number, scope: string, type: string, policyId: number, removal: boolean) => { + const globPols = await getRetentionPoliciesByScopeWithTypeAndLink(type, "global", 0) + let policyOverrides = [] // projects with override policies + switch (scope) { + case "global": + const projects = await getProjectIdsForAssociatedPolicyID(type, policyId, removal) + for (const p of projects) { + policyOverrides.push(p) + } + break; + case "organization": + const orgProjects = await query(sqlClientPool, organizationSql.selectOrganizationProjects(scopeId)) + let skip = false + for (const p of orgProjects) { + const pRetPols = await getRetentionPoliciesByScopeWithTypeAndLink(type, "project", p.id) + for (const rp of pRetPols) { + skip = true + switch (rp.source) { + case "global": + // if this policy is being removed from an organization, and there is a global policy that can be applied + // set that here + if (removal) { + if (rp.configuration.enabled) { + policyOverrides.push({pid: p.id, updatePolicy: true, rpid: rp.id}) + } else { + policyOverrides.push({pid: p.id, removePolicy: true}) + } + } + break; + case "organization": + // if this policy is being added to an organization, and there is a an organization policy that can be applied + // set that here + if (!removal) { + if (rp.configuration.enabled) { + policyOverrides.push({pid: p.id, updatePolicy: true, rpid: rp.id}) + } else { + policyOverrides.push({pid: p.id, removePolicy: true}) + } + } + break; + case "project": + // do nothing if the project has an override for the project, as it takes precedence + break; + } + } + if (!skip) { + // if the project has no other policies to apply + // then it need to have any policies that may have been attached to it, removed from it + // set that here for the policyEnforcer to consume + policyOverrides.push({pid: p.id, removePolicy: true}) + } + skip = false + } + break; + case "project": + let policyToApply = null + const projectData = await projectHelpers(sqlClientPool).getProjectById(scopeId) + const orgRows = await getRetentionPoliciesByOrganizationWithType(type, projectData.organization); + const pRetPols = await getRetentionPoliciesByScopeWithTypeAndLink(type, "project", scopeId) + if (pRetPols.length == 1) { + policyToApply = pRetPols[0] + } else { + if (orgRows.length == 1) { + policyToApply = orgRows[0] + } else { + if (globPols.length == 1) { + // apply the global polcy + policyToApply = globPols[0] + } + } + } + // if there is a policy to apply, and it is enabled, enable it here + if (policyToApply && policyToApply.configuration.enabled) { + policyOverrides.push({pid: scopeId, updatePolicy: true, rpid: policyToApply.id}) + } else { + // if the project has no other policies to apply + // then it need to have any policies that may have been attached to it, removed from it + // set that here for the policyEnforcer to consume + policyOverrides.push({pid: scopeId, removePolicy: true}) + } + break; + default: + throw new Error( + `No matching scope` + ); + } + return policyOverrides + } + const postRetentionPolicyUpdateHook = async (type: string, policyId: number, policyChanges: any, removal: boolean = false) => { + // retrieve all projects that need to be updated if a change in the policy is made + // not all policies will require immediate updates, but those that do will be done here + if (!policyChanges) { + policyChanges = await getProjectIdsForAssociatedPolicyID(type, policyId, removal) + } + await policyEnforcer(policyChanges, type) + } + // this hook can be used to perform actions when a policy is added to or removed from a scope + // depending on the scope + const postRetentionPolicyLinkHook = async (scopeId: number, scope: string, type: string, policyId: number, removal: boolean = false) => { + const policyChanges = await getRetentionPolicyChangesRequired(scopeId, scope, type, policyId, removal) + await policyEnforcer(policyChanges, type) + } + /* + policyEnforcer is the actual policy enforcement function, it will handle execution of policy changes that are required, if they are required. + the payload of `policyChanges` is as follows + [ + {pid: project.id, removePolicy: true}, + {pid: project.id, updatePolicy: true, rpid: policy.id} + ] + `removePolicy` indicates that any policies on this project of the requested type should be removed from this project + `updatePolicy` indicates that a policy of the requested type should be applied to this project + the post retention hooks (postRetentionPolicyLinkHook and postRetentionPolicyUpdateHook) will call policyEnforcer based on which resolver + called the hook (addRetentionPolicyLink, updateRetentionPolicy, removeRetentionPolicyLink) + */ + const policyEnforcer =async (policyChanges: any, type: string) => { + switch (type) { + case "harbor": + // send this to the harbor retention policy enforcer + await HarborRetentionEnforcer().updateProjects(sqlClientPool, policyChanges) + break; + case "history": + // do nothing, history changes are executed when deployment or task history is modified + // so policy updates are implemented in realtime + break; + default: + throw new Error( + `No matching type` + ); + } + } + // this hook can be used after a deployment has been updated to perform changes to any retention policies as required + const postDeploymentProjectPolicyHook = async (projectId: number, status: string ) => { + switch (status) { + case 'complete': + case 'failed': + case 'cancelled': + const rows = await getRetentionPoliciesByScopeWithTypeAndLink("harbor", "project", projectId); + if (rows[0]) { + // if a deployment is complete, cancelled, or fails, run the postretentionpolicylinkhook so that + // any harbor policies are applied to the new environment or project as required + // this is done just in case the project or environment was created AFTER the policy was created to ensure that it gets any updates + // additionally, it happens here rather than at project creation as there may be no harbor project at the time the project is created + await postRetentionPolicyLinkHook(projectId, "project", rows[0].type, rows[0].id, !rows[0].configuration.enabled) + } + break; + default: + break; + } + } + return { + getRetentionPolicy, + getRetentionPolicyByName, + getRetentionPoliciesByProjectWithType, + getRetentionPoliciesByOrganizationWithType, + getRetentionPoliciesByGlobalWithType, + getDeployTargetsForRetentionPoliciesByProject, + getEnvironmentsForRetentionPoliciesByProject, + getRetentionPolicyByTypeAndLink, + getRetentionPoliciesByTypePolicyIDAndLink, + getProjectIdsForAssociatedPolicyID, + getRetentionPolicyChangesRequired, + postRetentionPolicyLinkHook, + postRetentionPolicyUpdateHook, + policyEnforcer, + getRetentionPoliciesByScopeWithTypeAndLink, + postDeploymentProjectPolicyHook, + deleteRetentionPolicy: async (id: number) => { + // check for globals with this retention policy + const globals = await query( + sqlClientPool, Sql.selectRetentionPoliciesByLinkAndPolicyID(id, "global") + ); + if (globals.length > 0) { + throw new Error( + 'Unable to delete retention policy, it is in use globally and should be removed from global consumption first' + ); + } + + // check for organizations with this retention policy + const orgs = await query( + sqlClientPool, Sql.selectRetentionPoliciesByLinkAndPolicyID(id, "organization") + ); + if (orgs.length > 0) { + throw new Error( + 'Unable to delete retention policy, there are organizations using it that should be removed from first' + ); + } + + // check for organizations with this retention policy + const projects = await query( + sqlClientPool, Sql.selectRetentionPoliciesByLinkAndPolicyID(id, "project") + ); + if (projects.length > 0) { + throw new Error( + 'Unable to delete retention policy, there are projects using it that should be removed from first' + ); + } + await query( + sqlClientPool, + Sql.deleteRetentionPolicy(id) + ); + }, + updateRetentionPolicy: async (id: number, patch: any) => { + await query( + sqlClientPool, + Sql.updateRetentionPolicy({ + id: id, + patch: patch + }) + ); + } + }; +}; diff --git a/services/api/src/resources/retentionpolicy/history.ts b/services/api/src/resources/retentionpolicy/history.ts new file mode 100644 index 0000000000..12c1bd0445 --- /dev/null +++ b/services/api/src/resources/retentionpolicy/history.ts @@ -0,0 +1,207 @@ + + +import { Helpers } from './helpers'; +import { sqlClientPool } from '../../clients/sqlClient'; +import { Sql as deploymentSql } from '../deployment/sql'; +import { Sql as taskSql } from '../task/sql'; +import { + sendToLagoonActions, + // @ts-ignore +} from '@lagoon/commons/dist/tasks'; +import { query } from '../../util/db'; +import { logger } from '../../loggers/logger'; + +export const HistoryRetentionEnforcer = () => { + const cleanupTask = async (projectData: any, environmentData: any, task: any) => { + // clean up the task log history and associated files from S3 + const actionData = { + type: "retentionCleanup", + eventType: "taskCleanup", + data: { + environmentName: environmentData.name, + environmentId: environmentData.id, + projectName: projectData.name, + projectId: projectData.id, + task: { + id: task.id.toString(), + }, + remoteId: task.remoteId, + } + } + sendToLagoonActions("retentionCleanup", actionData) + } + const cleanupTasks = async (projectData: any, environmentData: any) => { + // basic clean up all but X latest tasks + const retpol = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink("history", "project", projectData.id) + if (retpol.length > 0) { + const c = retpol[0].configuration + if (c.enabled) { + let historyToDelete = [] + switch (c.taskType) { + case "count": + historyToDelete = await query(sqlClientPool, taskSql.selectTaskHistoryRetention(environmentData.id, c.taskHistory)); + break; + case "days": + historyToDelete = await query(sqlClientPool, taskSql.selectTaskHistoryRetentionDays(environmentData.id, c.taskHistory)); + break; + case "months": + historyToDelete = await query(sqlClientPool, taskSql.selectTaskHistoryRetentionMonths(environmentData.id, c.taskHistory)); + break; + } + for (const r of historyToDelete) { + // fire off message to action-handler service to proceed with cleaning up old data in buckets + const actionData = { + type: "retentionCleanup", + eventType: "taskCleanup", + data: { + environmentName: environmentData.name, + environmentId: environmentData.id, + projectName: projectData.name, + projectId: projectData.id, + task: { + id: r.id.toString(), + }, + remoteId: r.remoteId, + } + } + sendToLagoonActions("retentionCleanup", actionData) + } + if (historyToDelete.length != 0) { + switch (c.taskType) { + case "count": + await query(sqlClientPool, taskSql.deleteTaskHistory(environmentData.id, c.taskHistory)); + break; + case "days": + await query(sqlClientPool, taskSql.deleteTaskHistoryDays(environmentData.id, c.taskHistory)); + break; + case "months": + await query(sqlClientPool, taskSql.deleteTaskHistoryMonths(environmentData.id, c.taskHistory)); + break; + } + } + } + } + } + const cleanupDeployment = async (projectData: any, environmentData: any, deployment: any) => { + // clean up the deployment log history and associated files from S3 + const actionData = { + type: "retentionCleanup", + eventType: "buildCleanup", + data: { + environmentName: environmentData.name, + projectName: projectData.name, + environmentId: environmentData.id, + projectId: projectData.id, + buildName: deployment.name, + remoteId: deployment.remoteId, + } + } + sendToLagoonActions("retentionCleanup", actionData) + } + const cleanupDeployments = async (projectData: any, environmentData: any) => { + // basic clean up all but X latest tasks + const retpol = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink("history", "project", projectData.id) + if (retpol.length > 0) { + const c = retpol[0].configuration + if (c.enabled) { + let historyToDelete = [] + switch (c.taskType) { + case "count": + historyToDelete = await query(sqlClientPool, deploymentSql.selectDeploymentHistoryRetention(environmentData.id, c.deploymentHistory)); + break; + case "days": + historyToDelete = await query(sqlClientPool, deploymentSql.selectDeploymentHistoryRetentionDays(environmentData.id, c.deploymentHistory)); + break; + case "months": + historyToDelete = await query(sqlClientPool, deploymentSql.selectDeploymentHistoryRetentionMonths(environmentData.id, c.deploymentHistory)); + break; + } + for (const r of historyToDelete) { + // fire off message to action-handler service to proceed with cleaning up old data in buckets + const actionData = { + type: "retentionCleanup", + eventType: "buildCleanup", + data: { + environmentName: environmentData.name, + projectName: projectData.name, + environmentId: environmentData.id, + projectId: projectData.id, + buildName: r.name, + remoteId: r.remoteId, + } + } + sendToLagoonActions("retentionCleanup", actionData) + } + if (historyToDelete.length != 0) { + switch (c.taskType) { + case "count": + await query(sqlClientPool, deploymentSql.deleteDeploymentHistory(environmentData.id, c.deploymentHistory)); + break; + case "days": + await query(sqlClientPool, deploymentSql.deleteDeploymentHistoryDays(environmentData.id, c.deploymentHistory)); + break; + case "months": + await query(sqlClientPool, deploymentSql.deleteDeploymentHistoryMonths(environmentData.id, c.deploymentHistory)); + break; + } + } + } + } + } + const cleanupAllDeployments = async (projectData: any, environmentData: any) => { + // get all the environment deployment history + const historyToDelete = await query(sqlClientPool, deploymentSql.selectDeploymentHistoryForEnvironment(environmentData.id)); + for (const r of historyToDelete) { + // fire off message to action-handler service to proceed with cleaning up old data in buckets + const actionData = { + type: "retentionCleanup", + eventType: "buildCleanup", + data: { + environmentName: environmentData.name, + projectName: projectData.name, + environmentId: environmentData.id, + projectId: projectData.id, + buildName: r.name, + remoteId: r.remoteId, + } + } + sendToLagoonActions("retentionCleanup", actionData) + } + if (historyToDelete.length != 0) { + // delete all the environment deployment history + await query(sqlClientPool, deploymentSql.deleteDeploymentHistoryForEnvironment(environmentData.id)); + } + } + const cleanupAllTasks = async (projectData: any, environmentData: any) => { + // get all the environment task history + const historyToDelete = await query(sqlClientPool, taskSql.selectTaskHistoryForEnvironment(environmentData.id)); + for (const r of historyToDelete) { + // fire off message to action-handler service to proceed with cleaning up old data in buckets + const actionData = { + type: "retentionCleanup", + eventType: "buildCleanup", + data: { + environmentName: environmentData.name, + projectName: projectData.name, + environmentId: environmentData.id, + projectId: projectData.id, + buildName: r.name, + remoteId: r.remoteId, + } + } + sendToLagoonActions("retentionCleanup", actionData) + } + if (historyToDelete.length != 0) { + // delete all the environment task history + await query(sqlClientPool, taskSql.deleteTaskHistoryForEnvironment(environmentData.id)); + } + } + return { + cleanupDeployment, + cleanupDeployments, + cleanupTask, + cleanupTasks, + cleanupAllDeployments, + cleanupAllTasks, + }; +}; \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/resolvers.ts b/services/api/src/resources/retentionpolicy/resolvers.ts new file mode 100644 index 0000000000..cec63b151b --- /dev/null +++ b/services/api/src/resources/retentionpolicy/resolvers.ts @@ -0,0 +1,394 @@ + +import * as R from 'ramda'; +import { ResolverFn } from '..'; +import { logger } from '../../loggers/logger'; +import { isPatchEmpty, query, knex } from '../../util/db'; +import { Helpers } from './helpers'; +import { RetentionPolicy } from './types'; +import { Helpers as organizationHelpers } from '../organization/helpers'; +import { Helpers as projectHelpers } from '../project/helpers'; +import { Sql } from './sql'; + +export const createRetentionPolicy: ResolverFn = async ( + _root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + await hasPermission('retention_policy', 'add'); + + if (input.id) { + const retpol = await Helpers(sqlClientPool).getRetentionPolicy(input.id) + if (retpol) { + throw new Error( + `Retention policy with ID ${input.id} already exists` + ); + } + } + + // @ts-ignore + if (!input.type) { + throw new Error( + 'Must provide type' + ); + } + + const retpol = await Helpers(sqlClientPool).getRetentionPolicyByName(input.name) + if (retpol) { + throw new Error( + `Retention policy with name ${input.name} already exists` + ); + } + + // convert the type to the configuration json on import after passing through the validator + try { + input.configuration = await RetentionPolicy().returnValidatedConfiguration(input.type, input) + } catch (e) { + throw new Error( + `${e}` + ); + } + + const { insertId } = await query( + sqlClientPool, + Sql.createRetentionPolicy({ + ...input, + })); + + const row = await Helpers(sqlClientPool).getRetentionPolicy(insertId); + + userActivityLogger(`User created a retention policy`, { + project: '', + event: 'api:createRetentionPolicy', + payload: { + patch: { + name: input.name, + configuration: input.configuration, + }, + data: row + } + }); + + + return { ...row, configuration: {type: row.type, ...JSON.parse(row.configuration)} }; + // return row; +}; + +export const updateRetentionPolicy: ResolverFn = async ( + root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + await hasPermission('retention_policy', 'update'); + + if (isPatchEmpty(input)) { + throw new Error('input.patch requires at least 1 attribute'); + } + + const retpol = await Helpers(sqlClientPool).getRetentionPolicy(input.id) + if (!retpol) { + throw new Error( + `Retention policy does not exist` + ); + } + + let patch = { + name: input.patch.name + } + + if (!input.patch[retpol.type]) { + throw new Error( + `Missing configuration for type ${retpol.type}, patch not provided` + ); + } + + // convert the type to the configuration json on import after passing through the validator + try { + patch["configuration"] = await RetentionPolicy().returnValidatedConfiguration(retpol.type, input.patch) + } catch (e) { + throw new Error( + `${e}` + ); + } + + await Helpers(sqlClientPool).updateRetentionPolicy(input.id, patch); + + const row = await Helpers(sqlClientPool).getRetentionPolicy(input.id); + + userActivityLogger(`User updated retention policy`, { + project: '', + event: 'api:updateRetentionPolicy', + payload: { + patch: patch, + data: row + } + }); + + if (retpol.configuration != row.configuration) { + // if a policy is updated, and the configuration is not the same as before the update + // then run postRetentionPolicyUpdateHook to make sure that the policy enforcer does + // any policy updates for any impacted projects + const policyEnabled = input.patch[retpol.type].enabled + await Helpers(sqlClientPool).postRetentionPolicyUpdateHook(retpol.type, retpol.id, null, !policyEnabled) + } + + return { ...row, configuration: {type: row.type, ...JSON.parse(row.configuration)} }; + // return row; +}; + +export const deleteRetentionPolicy: ResolverFn = async ( + _root, + { id: rid }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + await hasPermission('retention_policy', 'delete'); + + const retpol = await Helpers(sqlClientPool).getRetentionPolicy(rid) + if (!retpol) { + throw new Error( + `Retention policy does not exist` + ); + } + + await Helpers(sqlClientPool).deleteRetentionPolicy(rid); + + userActivityLogger(`User deleted a retention policy '${retpol.name}'`, { + project: '', + event: 'api:deleteRetentionPolicy', + payload: { + input: { + retentionPolicy: rid + } + } + }); + + return 'success'; +}; + +export const listRetentionPolicies: ResolverFn = async ( + root, + { type, name }, + { sqlClientPool, hasPermission } +) => { + await hasPermission('retention_policy', 'viewAll'); + + let queryBuilder = knex('retention_policy'); + if (type) { + queryBuilder = queryBuilder.and.where('type', type); + } + + if (name) { + queryBuilder = queryBuilder.where('name', name); + } + + const rows = await query(sqlClientPool, queryBuilder.toString()); + return rows.map(row => ({ ...row, source: null, configuration: {type: row.type, ...JSON.parse(row.configuration)} })); +}; + + +export const addRetentionPolicyLink: ResolverFn = async ( + _root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + + let scopeId = 0 + switch (input.scope) { + case "global": + await hasPermission('retention_policy', 'addGlobal'); + break; + case "organization": + const organization = await organizationHelpers(sqlClientPool).getOrganizationByName(input.scopeName) + if (!organization) { + throw new Error( + `Organization does not exist` + ); + } + await hasPermission('retention_policy', 'addOrganization'); + scopeId = organization.id + break; + case "project": + const project = await projectHelpers(sqlClientPool).getProjectByProjectInput({name: input.scopeName}) + if (!project) { + throw new Error( + `Project does not exist` + ); + } + await hasPermission('retention_policy', 'addProject'); + scopeId = project.id + break; + default: + throw new Error( + `No matching scope` + ); + } + + const retpol = await Helpers(sqlClientPool).getRetentionPolicy(input.id) + if (!retpol) { + throw new Error( + `Retention policy does not exist` + ); + } + + const retpoltypes = await Helpers(sqlClientPool).getRetentionPolicyByTypeAndLink(retpol.type, scopeId, input.scope); + if (retpoltypes) { + throw new Error( + `A retention policy of type ${retpol.type} is already attached to the ${input.scope}` + ); + } + + await query( + sqlClientPool, + Sql.addRetentionPolicyLink( + input.id, + input.scope, + scopeId, + ) + ); + + // if a policy is linked to a scope (global, organization, project) + // then run postRetentionPolicyLinkHook to make sure that the policy enforcer does + // any policy updates for any impacted projects + await Helpers(sqlClientPool).postRetentionPolicyLinkHook(scopeId, input.scope, retpol.type, retpol.id, false) + + userActivityLogger(`User added a retention policy '${retpol.name}' to ${input.scope}`, { + project: '', + event: 'api:addRetentionPolicyOrganization', + payload: { + input: { + retentionPolicy: retpol.id, + scope: input.scope, + scopeId: scopeId + } + } + }); + + const row = await Helpers(sqlClientPool).getRetentionPolicy(input.id) + return { ...row, configuration: {type: row.type, ...JSON.parse(row.configuration)} }; +}; + +export const removeRetentionPolicyLink: ResolverFn = async ( + _root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + let scopeId = 0 + switch (input.scope) { + case "global": + await hasPermission('retention_policy', 'addGlobal'); + break; + case "organization": + const organization = await organizationHelpers(sqlClientPool).getOrganizationByName(input.scopeName) + if (!organization) { + throw new Error( + `Organization does not exist` + ); + } + await hasPermission('retention_policy', 'addOrganization'); + scopeId = organization.id + break; + case "project": + const project = await projectHelpers(sqlClientPool).getProjectByProjectInput({name: input.scopeName}) + if (!project) { + throw new Error( + `Project does not exist` + ); + } + await hasPermission('retention_policy', 'addProject'); + scopeId = project.id + break; + default: + throw new Error( + `No matching scope` + ); + } + + const retpol = await Helpers(sqlClientPool).getRetentionPolicy(input.id); + if (!retpol) { + throw new Error( + `Retention policy does not exist` + ); + } + + const retpoltypes = await Helpers(sqlClientPool).getRetentionPoliciesByTypePolicyIDAndLink(retpol.type, input.id, scopeId, input.scope); + if (retpoltypes.length == 0) { + throw new Error( + `No matching retention policy attached to this ${input.scope}` + ); + } + + let preDeleteProjectIds = [] + if (input.scope == "global") { + // this is calculated before the policies are removed, as it is used after removing the policy being + // passed into the post removal update hook if required, only for global scoped policies that are being unlinked + preDeleteProjectIds = await Helpers(sqlClientPool).getProjectIdsForAssociatedPolicyID(retpol.type, retpol.id, true) + } + + await query( + sqlClientPool, + Sql.deleteRetentionPolicyLink( + input.id, + input.scope, + scopeId, + ) + ); + + // if a policy is unlinked to a scope (global, organization, project) + // then run postRetentionPolicyLinkHook or postRetentionPolicyUpdateHook to make sure that the policy enforcer does + // any policy updates for any impacted projects + if (input.scope != "global") { + // if this is a standard organization or project policy unlink, then handle that with the post retention policy link hook + // this hook knows how to check the change that impacts those two scopes + await Helpers(sqlClientPool).postRetentionPolicyLinkHook(scopeId, input.scope, retpol.type, retpol.id, true) + } else { + // global policy applications when they're remove require a different calculation step that will update + // projects that don't use any policy overrides, this is because the depth of reach of a global policy + // is a bit trickier to calculate + await Helpers(sqlClientPool).postRetentionPolicyUpdateHook(retpol.type, retpol.id, preDeleteProjectIds, true) + } + + userActivityLogger(`User removed a retention policy '${retpol.name}' from organization`, { + project: '', + event: 'api:removeRetentionPolicyOrganization', + payload: { + input: { + retentionPolicy: retpol.id, + scope: input.scope, + scopeId: scopeId + } + } + }); + + return "success" +}; + +// This is only called by the project resolver, so there is no need to do any permission checks +export const getRetentionPoliciesByProjectId: ResolverFn = async ( + project, + args, + { sqlClientPool } +) => { + + let pid = args.project; + if (project) { + pid = project.id; + } + let rows = [] + rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink(args.type, "project", project.id); + return rows; +}; + +// This is only called by the organization resolver, so there is no need to do any permission checks +export const getRetentionPoliciesByOrganizationId: ResolverFn = async ( + organization, + args, + { sqlClientPool } +) => { + + let oid = args.organization; + if (organization) { + oid = organization.id; + } + let rows = [] + rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink(args.type, "organization", oid); + return rows; +}; \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/sql.ts b/services/api/src/resources/retentionpolicy/sql.ts new file mode 100644 index 0000000000..58ae419caf --- /dev/null +++ b/services/api/src/resources/retentionpolicy/sql.ts @@ -0,0 +1,123 @@ +import { knex } from '../../util/db'; + +export const Sql = { + updateRetentionPolicy: ({ id, patch }: { id: number, patch: { [key: string]: any } }) => { + const updatePatch = { + ...patch, + updated: knex.fn.now(), + }; + return knex('retention_policy') + .where('id', '=', id) + .update(updatePatch) + .toString(); + }, + selectRetentionPolicyById: (id: number) => + knex('retention_policy') + .where('id', '=', id) + .toString(), + selectRetentionPolicyByName: (name: string) => + knex('retention_policy') + .where('name', '=', name) + .toString(), + selectRetentionPoliciesByType: (type: string) => + knex('retention_policy') + .where('type', '=', type) + .toString(), + selectRetentionPoliciesByLink: (id: number, scope: string) => + knex('retention_policy as rp') + .select('rp.*') + .join('retention_policy_reference', 'rp.id', '=', 'retention_policy_reference.retention_policy') + .where(knex.raw('retention_policy_reference.scope = ?', scope)) + .andWhere(knex.raw('retention_policy_reference.id = ?', id)) + .toString(), + selectRetentionPoliciesByTypeAndLink: (type: string, id: number, scope: string) => + knex('retention_policy as rp') + .select('rp.*') + .join('retention_policy_reference', 'rp.id', '=', 'retention_policy_reference.retention_policy') + .where(knex.raw('retention_policy_reference.scope = ?', scope)) + .andWhere(knex.raw('retention_policy_reference.id = ?', id)) + .andWhere(knex.raw('rp.type = ?', type)) + .toString(), + selectRetentionPoliciesByTypePolicyIDAndLink: (type: string, policyId: number, id: number, scope: string) => + knex('retention_policy as rp') + .select('rp.*') + .join('retention_policy_reference', 'rp.id', '=', 'retention_policy_reference.retention_policy') + .where(knex.raw('retention_policy_reference.scope = ?', scope)) + .andWhere(knex.raw('retention_policy_reference.id = ?', id)) + .andWhere(knex.raw('rp.type = ?', type)) + .andWhere(knex.raw('rp.id = ?', policyId)) + .toString(), + selectRetentionPoliciesByLinkAndPolicyID: (id: number, scope: string) => + knex('retention_policy as rp') + .select('rp.*') + .join('retention_policy_reference', 'rp.id', '=', 'retention_policy_reference.retention_policy') + .where(knex.raw('retention_policy_reference.scope = ?', scope)) + .andWhere(knex.raw('rp.id = ?', id)) + .toString(), + selectScopeIDsByRetentionPolicyTypeExcludingPolicyID: (type: string, scope: string, policyId: number) => + knex('retention_policy as rp') + .select(knex.raw('group_concat(rpr.id) as scope_ids')) + .join('retention_policy_reference as rpr', 'rp.id', '=', 'rpr.retention_policy') + .where(knex.raw('rpr.scope = ?', scope)) + .andWhere(knex.raw('rp.type = ?', type)) + .whereNot(knex.raw('rp.id = ?', policyId)) + .toString(), + selectScopeIDsByRetentionPolicyTypeIncludingPolicyID: (type: string, scope: string, policyId: number) => + knex('retention_policy as rp') + .select(knex.raw('group_concat(rpr.id) as scope_ids')) + .join('retention_policy_reference as rpr', 'rp.id', '=', 'rpr.retention_policy') + .where(knex.raw('rpr.scope = ?', scope)) + .andWhere(knex.raw('rp.type = ?', type)) + .andWhere(knex.raw('rp.id = ?', policyId)) + .toString(), + deleteRetentionPolicy: (id: number) => + knex('retention_policy') + .where('id', '=', id) + .delete() + .toString(), + deleteRetentionPolicyLink: (id: number, scope: string, sid: number) => + knex('retention_policy_reference') + .where('retention_policy', '=', id) + .andWhere('scope', '=', scope) + .andWhere('id', '=', sid) + .delete() + .toString(), + createRetentionPolicy: (input) => { + const { + id, + name, + type, + configuration + } = input; + return knex('retention_policy').insert({ + id, + name, + type, + configuration + }).toString(); + }, + addRetentionPolicyLink: (id: number, scope: string, sid: number) => { + return knex('retention_policy_reference').insert({ + retentionPolicy: id, + scope, + id: sid + }).toString(); + }, + selectDeployTargetsForRetentionByProject: (pid: number) => + knex('project as p') + .select('p.name', 'p.id as pid', 'p.organization', 'dt.id as dtid', 'dt.name as dtname') + .join('environment as e', 'p.id', '=', 'e.project') + .join('openshift as dt', 'dt.id', '=', 'e.openshift') + .where('e.deleted', '0000-00-00 00:00:00') + .andWhere(knex.raw('p.id = ?', pid)) + .groupBy('p.name', 'e.openshift') + .toString(), + selectEnvironmentsForRetentionByProject: (pid: number) => + knex('project as p') + .select('p.name', 'p.id as pid', 'e.name as ename', 'e.id as eid', 'p.organization', 'dt.id as dtid', 'dt.name as dtname') + .join('environment as e', 'p.id', '=', 'e.project') + .join('openshift as dt', 'dt.id', '=', 'e.openshift') + .where('e.deleted', '0000-00-00 00:00:00') + .andWhere(knex.raw('p.id = ?', pid)) + .toString(), +} \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/types.ts b/services/api/src/resources/retentionpolicy/types.ts new file mode 100644 index 0000000000..1494d9b692 --- /dev/null +++ b/services/api/src/resources/retentionpolicy/types.ts @@ -0,0 +1,140 @@ +// the types for retention policies +export interface HarborRetentionPolicy { + enabled: Boolean + rules: [HarborRetentionRule] + schedule: string +} + +export interface HarborRetentionRule { + name: string + pattern: string + latestPulled: number +} + +export interface HistoryRetentionPolicy { + enabled: boolean + deploymentHistory: number + deploymentType: string + taskHistory: number + taskType: string +} + +export type HarborRetentionMessage = { + type: HarborRetentionMessageType + eventType: HarborRetentionEventType + data: { + project: { + id: number + name: string + } + policy?: HarborRetentionPolicy + } +} + +export enum HarborRetentionMessageType { + HarborRetentionPolicy = "harborRetentionPolicy" +} + +export enum HarborRetentionEventType { + RemovePolicy = "removePolicy", + UpdatePolicy = "updatePolicy" +} + +export const RetentionPolicy = () => { + const convertHarborRetentionPolicyToJSON = async ( + harbor: HarborRetentionPolicy + ): Promise => { + const c = JSON.stringify(harbor) + return c + }; + + const convertHistoryRetentionPolicyToJSON = async ( + history: HistoryRetentionPolicy + ): Promise => { + const c = JSON.stringify(history) + return c + }; + + const convertJSONToHarborRetentionPolicy = async ( + configuration: string + ): Promise => { + const c = JSON.parse(configuration) + if (typeof c.enabled != "boolean") { + throw new Error("enabled must be a boolean"); + } + for (const rule of c.rules) { + if (typeof rule.name != "string") { + throw new Error(`${rule.name}: name must be a string`); + } + if (typeof rule.pattern != "string") { + throw new Error(`${rule.name}: pattern must be a string`); + } + if (typeof rule.latestPulled != "number") { + throw new Error(`${rule.name}: latestPulled must be a number`); + } + } + if (typeof c.schedule != "string") { + throw new Error("schedule must be a string"); + } + return c + }; + + const convertJSONToHistoryRetentionPolicy = async ( + configuration: string + ): Promise => { + const c = JSON.parse(configuration) + if (typeof c.enabled != "boolean") { + throw new Error("enabled must be a boolean"); + } + if (typeof c.deploymentHistory != "number") { + throw new Error("deploymentHistory must be a number"); + } + if (typeof c.deploymentType != "string") { + throw new Error("deploymentHistory must be HistoryRetentionType"); + } + if (typeof c.taskHistory != "number") { + throw new Error("taskHistory must be a number"); + } + if (typeof c.taskType != "string") { + throw new Error("taskHistory must be HistoryRetentionType"); + } + return c + }; + + // run the configuration patches through the validation process + const returnValidatedConfiguration = async (type: string, patch: any): Promise => { + const c = JSON.stringify(patch[type]) + switch (type) { + case "harbor": + try { + await convertJSONToHarborRetentionPolicy(c) + return c + } catch (e) { + throw new Error( + `Provided configuration is not valid for type ${type}: ${e}` + ); + } + case "history": + try { + await convertJSONToHistoryRetentionPolicy(c) + return c + } catch (e) { + throw new Error( + `Provided configuration is not valid for type ${type}: ${e}` + ); + } + default: + throw new Error( + `Provided configuration is not valid for type ${type}` + ); + } + } + + return { + convertHarborRetentionPolicyToJSON, + convertHistoryRetentionPolicyToJSON, + convertJSONToHarborRetentionPolicy, + convertJSONToHistoryRetentionPolicy, + returnValidatedConfiguration + }; +}; \ No newline at end of file diff --git a/services/api/src/resources/task/helpers.ts b/services/api/src/resources/task/helpers.ts index d3243810d3..0b513a1a70 100644 --- a/services/api/src/resources/task/helpers.ts +++ b/services/api/src/resources/task/helpers.ts @@ -9,6 +9,7 @@ import { Sql } from './sql'; import { Sql as projectSql } from '../project/sql'; import { Sql as environmentSql } from '../environment/sql'; import { Helpers as environmentHelpers } from '../environment/helpers'; +import { HistoryRetentionEnforcer } from '../retentionpolicy/history'; import { logger } from '../../loggers/logger'; export const Helpers = (sqlClientPool: Pool, hasPermission) => { @@ -133,6 +134,9 @@ export const Helpers = (sqlClientPool: Pool, hasPermission) => { ); } + // pass to the HistoryRetentionEnforcer to clean up tasks based on any retention policies + await HistoryRetentionEnforcer().cleanupTasks(projectData, environmentData) + return taskData; }, addAdvancedTask: async ( diff --git a/services/api/src/resources/task/resolvers.ts b/services/api/src/resources/task/resolvers.ts index 4b7f5edb37..1d05e3ed03 100644 --- a/services/api/src/resources/task/resolvers.ts +++ b/services/api/src/resources/task/resolvers.ts @@ -18,6 +18,7 @@ import sha1 from 'sha1'; import { generateTaskName } from '@lagoon/commons/dist/util/lagoon'; import { sendToLagoonLogs } from '@lagoon/commons/dist/logs/lagoon-logger'; import { createMiscTask } from '@lagoon/commons/dist/tasks'; +import { HistoryRetentionEnforcer } from '../retentionpolicy/history'; const accessKeyId = process.env.S3_FILES_ACCESS_KEY_ID || 'minio' const secretAccessKey = process.env.S3_FILES_SECRET_ACCESS_KEY || 'minio123' @@ -329,8 +330,22 @@ export const deleteTask: ResolverFn = async ( project: R.path(['0', 'pid'], rows) }); + const task = await Helpers(sqlClientPool, hasPermission).getTaskByTaskInput({id: id}) + + if (!task) { + throw new Error( + `Invalid task input` + ); + } + + const environmentData = await environmentHelpers(sqlClientPool).getEnvironmentById(parseInt(task.environment)); + const projectData = await projectHelpers(sqlClientPool).getProjectById(environmentData.project); + await query(sqlClientPool, Sql.deleteTask(id)); + // pass the task to the HistoryRetentionEnforcer + await HistoryRetentionEnforcer().cleanupTask(projectData, environmentData, task) + userActivityLogger(`User deleted task '${id}'`, { project: '', event: 'api:deleteTask', diff --git a/services/api/src/resources/task/sql.ts b/services/api/src/resources/task/sql.ts index d287ad879f..385b13a49c 100644 --- a/services/api/src/resources/task/sql.ts +++ b/services/api/src/resources/task/sql.ts @@ -277,4 +277,56 @@ export const Sql = { .where('advanced_task_definition', taskId) .del() .toString(), + // this selects all tasks for the environment and returns everything outside of the requested retain value + selectTaskHistoryRetention: (id: number, retain: number) => + knex.raw(`SELECT id, name, remote_id FROM task + WHERE environment=`+id+` AND admin_only_view=0 AND id NOT IN ( + SELECT id + FROM ( + SELECT id + FROM task + WHERE environment=`+id+` AND admin_only_view=0 + ORDER BY id DESC + LIMIT `+retain+` + ) t + );`) + .toString(), + // this selects all tasks for the environment and returns everything outside of the requested retain days value + selectTaskHistoryRetentionDays: (environment: number, retain: number) => + knex.raw(`SELECT id, name, remote_id FROM task WHERE environment=`+environment+` AND admin_only_view=0 AND created >= NOW() - INTERVAL `+retain+` DAY;`) + .toString(), + // this selects all tasks for the environment and returns everything outside of the requested retain months value + selectTaskHistoryRetentionMonths: (environment: number, retain: number) => + knex.raw(`SELECT id, name, remote_id FROM task WHERE environment=`+environment+` AND admin_only_view=0 AND created >= NOW() - INTERVAL `+retain+` MONTH;`) + .toString(), + // this selects all tasks for the environment and returns everything + selectTaskHistoryForEnvironment: (environment: number) => + knex.raw(`SELECT id, name, remote_id FROM task WHERE environment=`+environment+`;`) + .toString(), + // same as select, except it deletes all tasks for the environment outside of the requested retain value + deleteTaskHistory: (environment: number, retain: number) => + knex.raw(`DELETE FROM task + WHERE environment=`+environment+` AND admin_only_view=0 AND id NOT IN ( + SELECT id + FROM ( + SELECT id + FROM task + WHERE environment=`+environment+` AND admin_only_view=0 + ORDER BY id DESC + LIMIT `+retain+` + ) t + );`) + .toString(), + // same as select, except it deletes all tasks for the environment outside of the requested retain value + deleteTaskHistoryDays: (environment: number, retain: number) => + knex.raw(`DELETE FROM task WHERE environment=`+environment+` AND admin_only_view=0 AND created >= NOW() - INTERVAL `+retain+` DAY;`) + .toString(), + // same as select, except it deletes all tasks for the environment outside of the requested retain value + deleteTaskHistoryMonths: (environment: number, retain: number) => + knex.raw(`DELETE FROM task WHERE environment=`+environment+` AND admin_only_view=0 AND created >= NOW() - INTERVAL `+retain+` MONTH;`) + .toString(), + // same as select, except it deletes all tasks for the environment outside of the requested retain value + deleteTaskHistoryForEnvironment: (environment: number) => + knex.raw(`DELETE FROM task WHERE environment=`+environment+`;`) + .toString(), }; diff --git a/services/api/src/typeDefs.js b/services/api/src/typeDefs.js index 40f116ac0c..2bf866557f 100644 --- a/services/api/src/typeDefs.js +++ b/services/api/src/typeDefs.js @@ -822,6 +822,11 @@ const typeDefs = gql` """ buildImage: String sharedBaasBucket: Boolean + """ + retentionPolicies are the available retention policies to a project, this will also include inherited policies from an organization + if the project is associated to an organization, and the organization has any retention policies + """ + retentionPolicies(type: RetentionPolicyType): [RetentionPolicy] } """ @@ -1109,6 +1114,10 @@ const typeDefs = gql` groups: [OrgGroupInterface] owners: [OrgUser] notifications(type: NotificationType): [Notification] + """ + retentionPolicies are the available retention policies to an organization + """ + retentionPolicies(type: RetentionPolicyType): [RetentionPolicy] } input AddOrganizationInput { @@ -1153,6 +1162,11 @@ const typeDefs = gql` groups: [OrgGroupInterface] groupCount: Int notifications: [OrganizationNotification] + """ + retentionPolicies are the available retention policies to a project, this will also include inherited policies from an organization + if the project is associated to an organization, and the organization has any retention policies + """ + retentionPolicies(type: RetentionPolicyType): [RetentionPolicy] } """ @@ -1450,6 +1464,7 @@ const typeDefs = gql` getProjectGroupOrganizationAssociation(input: ProjectOrgGroupsInput!): String @deprecated(reason: "Use checkBulkImportProjectsAndGroupsToOrganization instead") getEnvVariablesByProjectEnvironmentName(input: EnvVariableByProjectEnvironmentNameInput!): [EnvKeyValue] checkBulkImportProjectsAndGroupsToOrganization(input: AddProjectToOrganizationInput!): ProjectGroupsToOrganization + listRetentionPolicies(type: RetentionPolicyType, name: String): [RetentionPolicy] } type ProjectGroupsToOrganization { @@ -2302,6 +2317,167 @@ const typeDefs = gql` name: String } + """ + RetentionPolicyType is the types of retention policies supported in Lagoon + """ + enum RetentionPolicyType { + HARBOR + HISTORY + } + + """ + HarborRetentionPolicy is the type for harbor retention policies + """ + type HarborRetentionPolicy { + enabled: Boolean + rules: [HarborRetentionRule] + schedule: String + } + type HarborRetentionRule { + name: String + """ + Pattern is based on doublestar path pattern matching and globbing (harbor uses this) + Example, '[^pr\\-]*/*' to exclude pullrequests in a pattern, and 'pr-*' to only match pullrequest environments + https://github.com/bmatcuk/doublestar#patterns + """ + pattern: String + latestPulled: Int + } + + """ + HarborRetentionPolicyInput is the input for a HarborRetentionPolicy + """ + input HarborRetentionPolicyInput { + enabled: Boolean! + rules: [HarborRetentionRuleInput!] + schedule: String! + } + input HarborRetentionRuleInput { + name: String! + pattern: String! + latestPulled: Int! + } + + """ + HistoryRetentionType is the types of retention policies supported in Lagoon + """ + enum HistoryRetentionType { + COUNT + DAYS + MONTHS + } + + """ + HistoryRetentionPolicy is the type for history retention policies + """ + type HistoryRetentionPolicy { + enabled: Boolean + deploymentHistory: Int + """ + HistoryRetentionType to use + COUNT to retain up to X number items of history + DAYS to retain up to X number of days of history + MONTHS to retain up to X number of months of history + """ + deploymentType: HistoryRetentionType + taskHistory: Int + """ + HistoryRetentionType to use + COUNT to retain up to X number items of history + DAYS to retain up to X number of days of history + MONTHS to retain up to X number of months of history + """ + taskType: HistoryRetentionType + } + + """ + HistoryRetentionPolicyInput is the input for a HistoryRetentionPolicy + """ + input HistoryRetentionPolicyInput { + enabled: Boolean! + deploymentHistory: Int! + deploymentType: HistoryRetentionType! + taskHistory: Int! + taskType: HistoryRetentionType! + } + + """ + RetentionPolicyConfiguration is a union type of different retention policies supported in Lagoon + """ + union RetentionPolicyConfiguration = HarborRetentionPolicy | HistoryRetentionPolicy + + """ + RetentionPolicy is the return type for retention policies in Lagoon + """ + type RetentionPolicy { + id: Int + name: String + type: String + """ + configuration is the return type of union based retention policy configurations, the type of retention policy + influences the return type needed here + """ + configuration: RetentionPolicyConfiguration + created: String + updated: String + """ + source is where the retention policy source is coming from, this field is only populated when a project or organization + lists the available retention polices, and is used to indicate if a project is consuiming a retention policy from the project directly + or from the organization itself + """ + source: String + } + + """ + AddRetentionPolicyInput is used as the input for updating a retention policy, this is a union type + Currently only the 'harbor' type is supported as an input, if other retention policies are added in the future + They will be subfields of this input, the RetentionPolicyType must match the subfield input type + """ + input AddRetentionPolicyInput { + id: Int + name: String! + type: RetentionPolicyType! + harbor: HarborRetentionPolicyInput + history: HistoryRetentionPolicyInput + } + + """ + UpdateRetentionPolicyPatchInput is used as the input for updating a retention policy, this is a union type + Currently only the 'harbor' type is supported as a patch input, if other retention policies are added in the future + They will be subfields of this patch input + """ + input UpdateRetentionPolicyPatchInput { + name: String + harbor: HarborRetentionPolicyInput + history: HistoryRetentionPolicyInput + } + + """ + UpdateRetentionPolicyInput is used as the input for updating a retention policy + """ + input UpdateRetentionPolicyInput { + id: Int! + patch: UpdateRetentionPolicyPatchInput + } + + """ + RetentionPolicyScope is the types of retention policies scopes in Lagoon + """ + enum RetentionPolicyScope { + GLOBAL + ORGANIZATION + PROJECT + } + + """ + AddRetentionPolicyLinkInput is used as the input for associating a retention policy with a scope + """ + input AddRetentionPolicyLinkInput { + id: Int! + scope: RetentionPolicyScope! + scopeName: String + } + type Mutation { """ Add Environment or update if it is already existing @@ -2530,6 +2706,26 @@ const typeDefs = gql` bulkImportProjectsAndGroupsToOrganization(input: AddProjectToOrganizationInput, detachNotification: Boolean): ProjectGroupsToOrganization addOrUpdateEnvironmentService(input: AddEnvironmentServiceInput!): EnvironmentService deleteEnvironmentService(input: DeleteEnvironmentServiceInput!): String + """ + Create a retention policy + """ + createRetentionPolicy(input: AddRetentionPolicyInput!): RetentionPolicy + """ + Update a retention policy + """ + updateRetentionPolicy(input: UpdateRetentionPolicyInput!): RetentionPolicy + """ + Delete a retention policy + """ + deleteRetentionPolicy(id: Int!): String + """ + Add an existing retention policy to a resource type + """ + addRetentionPolicyLink(input: AddRetentionPolicyLinkInput!): RetentionPolicy + """ + Remove an existing retention policy from a resource type + """ + removeRetentionPolicyLink(input: AddRetentionPolicyLinkInput!): String } type Subscription { diff --git a/services/logs2notifications/main.go b/services/logs2notifications/main.go index 12225d1f10..f45ec5df89 100644 --- a/services/logs2notifications/main.go +++ b/services/logs2notifications/main.go @@ -95,15 +95,15 @@ func main() { flag.BoolVar(&disableS3, "disable-s3", false, "Disable the logs2s3 feature.") flag.StringVar(&s3FilesAccessKeyID, "s3-files-access-key", "minio", - "The jwt audience.") + "The S3 files access key.") flag.StringVar(&s3FilesSecretAccessKey, "s3-files-secret-access-key", "minio123", - "The jwt audience.") + "The S3 files secret access key.") flag.StringVar(&s3FilesBucket, "s3-files-bucket", "lagoon-files", - "The jwt audience.") + "The S3 files bucket.") flag.StringVar(&s3FilesRegion, "s3-files-region", "auto", - "The jwt audience.") + "The S3 files region.") flag.StringVar(&s3FilesOrigin, "s3-files-origin", "http://minio.127.0.0.1.nip.io:9000", - "The jwt audience.") + "The S3 files origin.") flag.BoolVar(&s3isGCS, "s3-google-cloud", false, "If the storage backend is google cloud.")