Skip to content

Commit

Permalink
ci: re-run flaky docs examples e2e tests in `test_docs_examples[_ivy]…
Browse files Browse the repository at this point in the history
…` jobs (angular#32497)

The docs examples e2e tests have been quite flaky recently. This causes
the `test_docs_examples` and `test_docs_examples_ivy` CircleCI jobs to
often fail (and block PRs) without a real reason.

This commit adds support for re-running failed docs examples e2e tests
and configures the `test_docs_examples` and `test_docs_examples_ivy`
jobs to try running each test that fails a second time, before giving up
and marking it as failed.

Closes angular#31841
Closes angular#31842

PR Close angular#32497
  • Loading branch information
gkalpak authored and matsko committed Sep 5, 2019
1 parent 62d92f8 commit 497d6b1
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 17 deletions.
4 changes: 2 additions & 2 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -418,7 +418,7 @@ jobs:
# Run examples tests. The "CIRCLE_NODE_INDEX" will be set if "parallelism" is enabled.
# Since the parallelism is set to "3", there will be three parallel CircleCI containers
# with either "0", "1" or "2" as node index. This can be passed to the "--shard" argument.
- run: yarn --cwd aio example-e2e --setup --local --cliSpecsConcurrency=5 --shard=${CIRCLE_NODE_INDEX}/${CIRCLE_NODE_TOTAL}
- run: yarn --cwd aio example-e2e --setup --local --cliSpecsConcurrency=5 --shard=${CIRCLE_NODE_INDEX}/${CIRCLE_NODE_TOTAL} --retry 2

test_docs_examples_ivy:
<<: *job_defaults
Expand All @@ -444,7 +444,7 @@ jobs:
# Run examples tests with ivy. The "CIRCLE_NODE_INDEX" will be set if "parallelism" is enabled.
# Since the parallelism is set to "3", there will be three parallel CircleCI containers
# with either "0", "1" or "2" as node index. This can be passed to the "--shard" argument.
- run: yarn --cwd aio example-e2e --setup --local --ivy --cliSpecsConcurrency=5 --shard=${CIRCLE_NODE_INDEX}/${CIRCLE_NODE_TOTAL}
- run: yarn --cwd aio example-e2e --setup --local --ivy --cliSpecsConcurrency=5 --shard=${CIRCLE_NODE_INDEX}/${CIRCLE_NODE_TOTAL} --retry 2

# This job should only be run on PR builds, where `CI_PULL_REQUEST` is not `false`.
aio_preview:
Expand Down
49 changes: 34 additions & 15 deletions aio/tools/examples/run-example-e2e.js
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,10 @@ if (argv.ivy) {
* Run Protractor End-to-End Tests for Doc Samples
*
* Flags
* --filter to filter/select _example app subdir names
* --filter to filter/select _example app subdir names
* e.g. --filter=foo // all example apps with 'foo' in their folder names.
*
* --setup run yarn install, copy boilerplate and update webdriver
* --setup to run yarn install, copy boilerplate and update webdriver
* e.g. --setup
*
* --local to use the locally built Angular packages, rather than versions from npm
Expand All @@ -55,6 +55,9 @@ if (argv.ivy) {
*
* --cliSpecsConcurrency Amount of CLI example specs that should be executed concurrently.
* By default runs specs sequentially.
*
* --retry to retry failed tests (useful for overcoming flakes)
* e.g. --retry 3 // To try each test up to 3 times.
*/
function runE2e() {
if (argv.setup) {
Expand All @@ -70,7 +73,7 @@ function runE2e() {

return Promise.resolve()
.then(() => findAndRunE2eTests(argv.filter, outputFile, argv.shard,
argv.cliSpecsConcurrency || DEFAULT_CLI_SPECS_CONCURRENCY))
argv.cliSpecsConcurrency || DEFAULT_CLI_SPECS_CONCURRENCY, argv.retry || 1))
.then((status) => {
reportStatus(status, outputFile);
if (status.failed.length > 0) {
Expand All @@ -85,7 +88,7 @@ function runE2e() {

// Finds all of the *e2e-spec.tests under the examples folder along with the corresponding apps
// that they should run under. Then run each app/spec collection sequentially.
function findAndRunE2eTests(filter, outputFile, shard, cliSpecsConcurrency) {
function findAndRunE2eTests(filter, outputFile, shard, cliSpecsConcurrency, maxAttempts) {
const shardParts = shard ? shard.split('/') : [0, 1];
const shardModulo = parseInt(shardParts[0], 10);
const shardDivider = parseInt(shardParts[1], 10);
Expand All @@ -97,9 +100,22 @@ function findAndRunE2eTests(filter, outputFile, shard, cliSpecsConcurrency) {
fs.writeFileSync(outputFile, header);

const status = {passed: [], failed: []};
const updateStatus = (specPath, passed) => {
const updateStatus = (specDescription, passed) => {
const arr = passed ? status.passed : status.failed;
arr.push(specPath);
arr.push(specDescription);
};
const runTest = async (specPath, testFn) => {
let attempts = 0;
let passed = false;

while (true) {
attempts++;
passed = await testFn();

if (passed || (attempts >= maxAttempts)) break;
}

updateStatus(`${specPath} (attempts: ${attempts})`, passed);
};

return getE2eSpecs(EXAMPLES_PATH, filter)
Expand All @@ -117,12 +133,13 @@ function findAndRunE2eTests(filter, outputFile, shard, cliSpecsConcurrency) {

return e2eSpecPaths.systemjs
.reduce(
(promise, specPath) => {
return promise.then(() => {
const examplePath = path.dirname(specPath);
return runE2eTestsSystemJS(examplePath, outputFile)
.then(passed => updateStatus(examplePath, passed));
});
async (prevPromise, specPath) => {
await prevPromise;

const examplePath = path.dirname(specPath);
const testFn = () => runE2eTestsSystemJS(examplePath, outputFile);

await runTest(examplePath, testFn);
},
Promise.resolve())
.then(async () => {
Expand All @@ -138,9 +155,11 @@ function findAndRunE2eTests(filter, outputFile, shard, cliSpecsConcurrency) {
const bufferOutput = cliSpecsConcurrency > 1;
while (specQueue.length) {
const chunk = specQueue.splice(0, cliSpecsConcurrency);
await Promise.all(chunk.map((testDir, index) => {
return runE2eTestsCLI(testDir, outputFile, bufferOutput, ports.pop())
.then(passed => updateStatus(testDir, passed));
await Promise.all(chunk.map(testDir => {
const port = ports.pop();
const testFn = () => runE2eTestsCLI(testDir, outputFile, bufferOutput, port);

return runTest(testDir, testFn);
}));
}
});
Expand Down

0 comments on commit 497d6b1

Please sign in to comment.