forked from JDvorak/slamengine
-
Notifications
You must be signed in to change notification settings - Fork 0
/
.travis.yml
198 lines (157 loc) · 5.79 KB
/
.travis.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
language: scala
scala: 2.12.4 # this needs to be in sync with Scala version used in .appveyor.yml
jdk: oraclejdk8
dist: trusty
sudo: required
services:
- docker
stages:
- name: clean
if: NOT type = pull_request
- name: compile
- name: unit test
- name: test
- name: publish
# weirdly, we have to add the extra PR check
if: NOT type = pull_request
# this all goes in the test stage
env:
matrix:
# - CONNECTOR=couchbase
- CONNECTOR=marklogic_json
- CONNECTOR=marklogic_xml
- CONNECTOR=mimir
- CONNECTOR=mongodb_3_2
- CONNECTOR=mongodb_3_4
- CONNECTOR=mongodb_3_4_13
- CONNECTOR=mongodb_3_6
- CONNECTOR=mongodb_read_only
- CONNECTOR=postgres
# - CONNECTOR=spark_local_test # no spark for 2.12
# - CONNECTOR=spark_hdfs # no spark for 2.12
global:
- COURSIER_PROGRESS=0
- SBT=./sbt
# DISCORD_WEBHOOK_TOKENS
- secure: "copZbrzCXeRfTx2ZRM6nsM07A4rfQAZ0mqk4R+z6USJhjLHv+KXAN30QN2nIAi9FmLoEX8BZRRpbX36nkc81jverhbS6POaci5UcQLpo8HOX4yR3b/pWqIBlno55K5hC6UCEXAneI/ZThFKwotpME9dw7ww+S+gIyaoDvZc4OUI="
# this is also the test stage :eyeroll:
script:
- set -e
# travis installs postgres by default this will stop and free up the psql port
# needed by the postgreql container
- sudo /etc/init.d/postgresql stop
- sudo /etc/init.d/mysql stop
- sudo service memcached stop
- docker/scripts/setupContainers -u quasar_metastore
- docker/scripts/setupContainers -u quasar_$CONNECTOR
- docker ps
# populate the it/testing.conf file
- docker/scripts/assembleTestingConf -c quasar_metastore
- docker/scripts/assembleTestingConf -i quasar_$CONNECTOR
- cat it/testing.conf
- ./sbt preBuild
- |-
SPECIFIC_DELEGATE=
case $CONNECTOR in
couchbase) SPECIFIC_DELEGATE="couchbaseIt/testOnly -- xonly failtrace" ;;
marklogic_*) SPECIFIC_DELEGATE="marklogicIt/testOnly -- xonly failtrace" ;;
mongodb_*) SPECIFIC_DELEGATE="mongoIt/testOnly -- xonly failtrace" ;;
spark_hdfs)
./sbt -DisIsolatedEnv=${ISOLATED_ENV:=false} ++$TRAVIS_SCALA_VERSION \
'set every assemblyJarName in assembly := "sparkcore.jar"' \
'set every sparkDependencyProvided := true' \
sparkcore/assembly
cp ./.targets/sparkcore/scala-2.11/sparkcore.jar $TRAVIS_BUILD_DIR
export SPARKCORE_JAR_PATH="$TRAVIS_BUILD_DIR/sparkcore.jar"
;;
*) ;;
esac
# workaround for the fact that travis caching isn't working. when it starts working again, remove this
- ./sbt -DisIsolatedEnv=${ISOLATED_ENV:=false} ++$TRAVIS_SCALA_VERSION foundation/test:compile
- travis_wait 40 ./sbt -DisIsolatedEnv=${ISOLATED_ENV:=false} ++$TRAVIS_SCALA_VERSION connector/test:compile
# then run the tests (note that this re-runs some tests; we can get rid of that once we have polyrepo)
- |-
./sbt -DisIsolatedEnv=${ISOLATED_ENV:=false} ++$TRAVIS_SCALA_VERSION \
it/sideEffectTestFSConfig \
"it/testOnly -- xonly failtrace" \
"$SPECIFIC_DELEGATE"
- set +e
jobs:
include:
- stage: clean
env:
script:
- ./sbt -DisIsolatedEnv=${ISOLATED_ENV:=false} clean
- stage: compile
env:
script:
- set -e
- ./sbt preBuild
- |-
./sbt -DisIsolatedEnv=${ISOLATED_ENV:=false} ++$TRAVIS_SCALA_VERSION \
checkHeaders \
test:compile
- ./sbt postBuild
- set +e
# note that the "test" stage has special significance to Travis (it's the only matrix-able stage)
- stage: unit test
env:
script:
- ./sbt preBuild
- |-
./sbt -DisIsolatedEnv=${ISOLATED_ENV:=false} ++$TRAVIS_SCALA_VERSION \
it/sideEffectTestFSConfig \
"testOnly -- xonly failtrace" \
"exclusive:testOnly -- xonly failtrace"
- stage: publish
env:
script:
- set -e
- ./sbt preBuild
# workaround for the fact that travis caching isn't working. when it starts working again, remove this
- ./sbt -DisIsolatedEnv=${ISOLATED_ENV:=false} ++$TRAVIS_SCALA_VERSION foundation/test:compile
- travis_wait 40 ./sbt -DisIsolatedEnv=${ISOLATED_ENV:=false} ++$TRAVIS_SCALA_VERSION connector/test:compile
- './sbt -DisIsolatedEnv=${ISOLATED_ENV:=false} ++$TRAVIS_SCALA_VERSION doc web/assembly'
- scripts/testJar
# release to sonatype
- scripts/quasarPublishAndTag
# recreate sparkcore.jar, which is just going to hang out
# sparkcore currently disabled
# - |-
# ./sbt -DisIsolatedEnv=${ISOLATED_ENV:=false} ++$TRAVIS_SCALA_VERSION \
# 'set every assemblyJarName in assembly := "sparkcore.jar"' \
# 'set every sparkDependencyProvided := true' \
# sparkcore/assembly
# release to github
- scripts/publishJar
- set +e
notifications:
irc:
template:
- "%{result}: %{repository_slug}#%{build_number} (%{branch}@%{commit}: %{author})
%{build_url}"
slack:
secure: k7tat0w0CSokOD1K0nfPhFY9Z3xkYHXboNlW1WgNAjqtq56hQsfQWhN8z6cXRAs/CgT8ME0K//wDN/HgdG91/aVh1smv/hxMa6P/o70GclhvUkB4iTis3kv9la3Kf2w3K5pbWJ6fFLdAZqc5i9XpQ8q+d7UTgwAxj1ZcYwaCSVo=
after_success:
- scripts/discordTravisPost success https://discordapp.com/api/webhooks/$DISCORD_WEBHOOK_TOKENS
after_failure:
- scripts/discordTravisPost failure https://discordapp.com/api/webhooks/$DISCORD_WEBHOOK_TOKENS
branches:
only:
- master
- next-major
- /^backport.*$/
cache:
directories:
- $HOME/.cache/quasar/fileDownloads
- $HOME/.coursier/cache
- $HOME/.ivy2/cache
- $HOME/.sbt
- target
- .targets
- '.hoarder-cache'
- project/project/target
- project/target
before_cache:
- find "$HOME/.sbt/" -name '*.lock' -print0 | xargs -0 rm
- find "$HOME/.ivy2/" -name 'ivydata-*.properties' -print0 | xargs -0 rm