Skip to content

Commit

Permalink
PR to fix leo automation test failures (#861)
Browse files Browse the repository at this point in the history
  • Loading branch information
vkumra-broad authored Apr 12, 2019
1 parent 80357cf commit 8f374b4
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 11 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -161,12 +161,16 @@ class NotebookClusterMonitoringSpec extends FreeSpec with NotebookTestUtils with
status shouldBe ClusterStatus.Updating
}

eventually(timeout(Span(300, Seconds)), interval(Span(30, Seconds))) {
val clusterResponse = Leonardo.cluster.get(project, cluster.clusterName)
clusterResponse.machineConfig.numberOfWorkers shouldBe Some(3)
clusterResponse.status shouldBe ClusterStatus.Running
val timeToAddWorker = time{
eventually(timeout(Span(420, Seconds)), interval(Span(30, Seconds))) {
val clusterResponse = Leonardo.cluster.get(project, cluster.clusterName)
clusterResponse.machineConfig.numberOfWorkers shouldBe Some(3)
clusterResponse.status shouldBe ClusterStatus.Running
}
}

logger.info(s"Adding worker to ${cluster.projectNameString}} took ${timeToAddWorker.duration.toSeconds} seconds")

//now that we have confirmed that we can add a worker node, let's see what happens when we size it back down to 2 workers
Leonardo.cluster.update(project, cluster.clusterName, ClusterRequest(machineConfig = Option(twoWorkersMachineConfig)))

Expand All @@ -175,11 +179,15 @@ class NotebookClusterMonitoringSpec extends FreeSpec with NotebookTestUtils with
status shouldBe ClusterStatus.Updating
}

eventually(timeout(Span(300, Seconds)), interval(Span(30, Seconds))) {
val clusterResponse = Leonardo.cluster.get(project, cluster.clusterName)
clusterResponse.machineConfig.numberOfWorkers shouldBe Some(2)
clusterResponse.status shouldBe ClusterStatus.Running
val timeToRemoveWorker = time {
eventually(timeout(Span(420, Seconds)), interval(Span(30, Seconds))) {
val clusterResponse = Leonardo.cluster.get(project, cluster.clusterName)
clusterResponse.machineConfig.numberOfWorkers shouldBe Some(2)
clusterResponse.status shouldBe ClusterStatus.Running
}
}

logger.info(s"Removing worker to ${cluster.projectNameString}} took ${timeToRemoveWorker.duration.toSeconds} seconds")
}
}
}
Expand Down
6 changes: 4 additions & 2 deletions src/main/resources/jupyter/init-actions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,8 @@ if [[ "${ROLE}" == 'Master' ]]; then
COMPOSE_FILES+=(-f /etc/`basename ${RSTUDIO_DOCKER_COMPOSE}`)
fi

cat "${COMPOSE_FILES[@]}"

retry 5 docker-compose "${COMPOSE_FILES[@]}" config
retry 5 docker-compose "${COMPOSE_FILES[@]}" pull
retry 5 docker-compose "${COMPOSE_FILES[@]}" up -d
Expand Down Expand Up @@ -302,8 +304,8 @@ if [[ "${ROLE}" == 'Master' ]]; then
retry 3 docker exec -u root -e PIP_USER=false ${JUPYTER_SERVER_NAME} ${JUPYTER_HOME}/${JUPYTER_USER_SCRIPT}
fi

docker exec -u root ${JUPYTER_SERVER_NAME} chown -R jupyter-user:users ${JUPYTER_HOME}
docker exec -u root ${JUPYTER_SERVER_NAME} chown -R jupyter-user:users /usr/local/share/jupyter/lab
retry 5 docker exec -u root ${JUPYTER_SERVER_NAME} chown -R jupyter-user:users ${JUPYTER_HOME}
retry 5 docker exec -u root ${JUPYTER_SERVER_NAME} chown -R jupyter-user:users /usr/local/share/jupyter/lab

#Install lab extensions
#Note: lab extensions need to installed as jupyter user, not root
Expand Down
2 changes: 1 addition & 1 deletion src/main/resources/jupyter/proxy-docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@ services:
restart: always
environment:
HTTPD_PORT: '80'
SSL_HTTPD_PORT: '443'
SSL_HTTPD_PORT: '443'

0 comments on commit 8f374b4

Please sign in to comment.