forked from DataDog/dd-trace-py
-
Notifications
You must be signed in to change notification settings - Fork 0
/
.gitlab-ci.yml
136 lines (126 loc) · 3.68 KB
/
.gitlab-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
stages:
- package
- deploy
- benchmarks
- benchmarks-pr-comment
include:
- remote: https://gitlab-templates.ddbuild.io/apm/packaging.yml
- local: ".gitlab/benchmarks.yml"
variables:
DOWNSTREAM_BRANCH:
value: "master"
description: "Run a specific datadog-reliability-env branch downstream"
DOWNSTREAM_MBP_BRANCH:
value: "dd-trace-py"
description: "Run a specific relenv-microbenchmarking-platform branch downstream"
PYTHON_PACKAGE_VERSION:
description: "Version to build for .deb and .rpm. Must be already published in PyPi"
.common: &common
tags: [ "runner:main", "size:large" ]
package:
extends: .package
rules:
- if: $PYTHON_PACKAGE_VERSION
when: on_success
- if: '$CI_COMMIT_TAG =~ /^v.*/'
when: on_success
script:
- ../.gitlab/build-deb-rpm.sh
.release-package:
stage: deploy
variables:
PRODUCT_NAME: auto_inject-python
deploy_to_reliability_env:
stage: deploy
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: on_success
- when: manual
allow_failure: true
trigger:
project: DataDog/apm-reliability/datadog-reliability-env
branch: $DOWNSTREAM_BRANCH
variables:
UPSTREAM_PROJECT_ID: $CI_PROJECT_ID
UPSTREAM_PROJECT_NAME: $CI_PROJECT_NAME
UPSTREAM_BRANCH: $CI_COMMIT_REF_NAME
UPSTREAM_COMMIT_SHA: $CI_COMMIT_SHA
deploy_to_di_backend:automatic:
stage: deploy
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: never
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
when: on_success
trigger:
project: DataDog/debugger-demos
branch: main
variables:
UPSTREAM_PROJECT_ID: $CI_PROJECT_ID
UPSTREAM_PROJECT_NAME: $CI_PROJECT_NAME
UPSTREAM_COMMIT_SHORT_SHA: $CI_COMMIT_SHORT_SHA
UPSTREAM_PIPELINE_ID: $CI_PIPELINE_ID
UPSTREAM_TAG: $CI_COMMIT_TAG
UPSTREAM_PACKAGE_JOB: build
deploy_to_di_backend:manual:
stage: deploy
rules:
- when: manual
allow_failure: true
trigger:
project: DataDog/debugger-demos
branch: main
variables:
UPSTREAM_PROJECT_ID: $CI_PROJECT_ID
UPSTREAM_PROJECT_NAME: $CI_PROJECT_NAME
UPSTREAM_COMMIT_SHORT_SHA: $CI_COMMIT_SHORT_SHA
UPSTREAM_PIPELINE_ID: $CI_PIPELINE_ID
UPSTREAM_COMMIT_AUTHOR: $CI_COMMIT_AUTHOR
UPSTREAM_TAG: $CI_COMMIT_TAG
UPSTREAM_PACKAGE_JOB: build
deploy_to_docker_registries:
stage: deploy
rules:
- if: '$POPULATE_CACHE'
when: never
- if: '$CI_COMMIT_TAG =~ /^v.*/'
when: on_success
- when: manual
allow_failure: true
trigger:
project: DataDog/public-images
branch: main
strategy: depend
variables:
IMG_SOURCES: ghcr.io/datadog/dd-trace-py/dd-lib-python-init:$CI_COMMIT_TAG
IMG_DESTINATIONS: dd-lib-python-init:$CI_COMMIT_TAG
IMG_SIGNING: "false"
# Wait 4 hours to trigger the downstream job.
# This is a work-around since there isn't a way to trigger
# Gitlab from the Github workflow (build_deploy.yml:upload_pypi).
#
# The caveat here is that if there is a failure to build to PyPI and it
# isn't fixed in the retry period then this job will fail and images will
# not be published.
RETRY_DELAY: 14400
RETRY_COUNT: 3
deploy_latest_tag_to_docker_registries:
stage: deploy
rules:
- if: '$POPULATE_CACHE'
when: never
- if: '$CI_COMMIT_TAG =~ /^v.*/'
when: on_success
- when: manual
allow_failure: true
trigger:
project: DataDog/public-images
branch: main
strategy: depend
variables:
IMG_SOURCES: ghcr.io/datadog/dd-trace-py/dd-lib-python-init:$CI_COMMIT_TAG
IMG_DESTINATIONS: dd-lib-python-init:latest
IMG_SIGNING: "false"
# See above note in the `deploy_to_docker_registries` job.
RETRY_DELAY: 14400
RETRY_COUNT: 3