From 4602241f11b626d40bf67cfe3a13d3c146adf335 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Wed, 11 Mar 2015 10:08:51 -0700 Subject: [PATCH] Fix clear cache docs release. Clear the cache for all files. Don't worry about what changed. Move the docs release script back to the docker/docker repo, so we all can easily watch the changes. Docker-DCO-1.1-Signed-off-by: Jessie Frazelle (github: jfrazelle) Docker-DCO-1.1-Signed-off-by: Jessie Frazelle (github: jfrazelle) --- Makefile | 1 - docs/Dockerfile | 1 + docs/release.sh | 166 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 167 insertions(+), 1 deletion(-) create mode 100644 docs/release.sh diff --git a/Makefile b/Makefile index 1c71e00fad24f..5eb7b8d5cc6e8 100644 --- a/Makefile +++ b/Makefile @@ -86,7 +86,6 @@ build: bundles docker build -t "$(DOCKER_IMAGE)" . docs-build: - git fetch https://github.com/docker/docker.git docs && git diff --name-status FETCH_HEAD...HEAD -- docs > docs/changed-files cp ./VERSION docs/VERSION echo "$(GIT_BRANCH)" > docs/GIT_BRANCH # echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET diff --git a/docs/Dockerfile b/docs/Dockerfile index 969c0e814fbff..dd61fa8df02c1 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -21,6 +21,7 @@ COPY ./VERSION VERSION # TODO: don't do this - look at merging the yml file in build.sh COPY ./mkdocs.yml mkdocs.yml COPY ./s3_website.json s3_website.json +COPY ./release.sh release.sh # Docker Swarm #ADD https://raw.githubusercontent.com/docker/swarm/master/docs/mkdocs.yml /docs/mkdocs-swarm.yml diff --git a/docs/release.sh b/docs/release.sh new file mode 100644 index 0000000000000..98938b168f9bf --- /dev/null +++ b/docs/release.sh @@ -0,0 +1,166 @@ +#!/usr/bin/env bash +set -e + +set -o pipefail + +usage() { + cat >&2 <<'EOF' +To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file +(with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file) +and set the AWS_S3_BUCKET env var to the name of your bucket. + +If you're publishing the current release's documentation, also set `BUILD_ROOT=yes` + +make AWS_S3_BUCKET=docs-stage.docker.com docs-release + +will then push the documentation site to your s3 bucket. + + Note: you can add `OPTIONS=--dryrun` to see what will be done without sending to the server +EOF + exit 1 +} + +[ "$AWS_S3_BUCKET" ] || usage + +VERSION=$(cat VERSION) + +if [ "$AWS_S3_BUCKET" == "docs.docker.com" ]; then + if [ "${VERSION%-dev}" != "$VERSION" ]; then + echo "Please do not push '-dev' documentation to docs.docker.com ($VERSION)" + exit 1 + fi + cat > ./sources/robots.txt <<'EOF' +User-agent: * +Allow: / +EOF + +else + cat > ./sources/robots.txt <<'EOF' +User-agent: * +Disallow: / +EOF +fi + +# Remove the last version - 1.0.2-dev -> 1.0 +MAJOR_MINOR="v${VERSION%.*}" +export MAJOR_MINOR + +export BUCKET=$AWS_S3_BUCKET + +export AWS_CONFIG_FILE=$(pwd)/awsconfig +[ -e "$AWS_CONFIG_FILE" ] || usage +export AWS_DEFAULT_PROFILE=$BUCKET + +echo "cfg file: $AWS_CONFIG_FILE ; profile: $AWS_DEFAULT_PROFILE" + +setup_s3() { + echo "Create $BUCKET" + # Try creating the bucket. Ignore errors (it might already exist). + aws s3 mb --profile $BUCKET s3://$BUCKET 2>/dev/null || true + # Check access to the bucket. + echo "test $BUCKET exists" + aws s3 --profile $BUCKET ls s3://$BUCKET + # Make the bucket accessible through website endpoints. + echo "make $BUCKET accessible as a website" + #aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html + s3conf=$(cat s3_website.json | envsubst) + echo + echo $s3conf + echo + aws s3api --profile $BUCKET put-bucket-website --bucket $BUCKET --website-configuration "$s3conf" +} + +build_current_documentation() { + mkdocs build + cd site/ + gzip -9k -f search_content.json + cd .. +} + +upload_current_documentation() { + src=site/ + dst=s3://$BUCKET$1 + + cache=max-age=3600 + if [ "$NOCACHE" ]; then + cache=no-cache + fi + + echo + echo "Uploading $src" + echo " to $dst" + echo + + # a really complicated way to send only the files we want + # if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go + # versions.html_fragment + include="--recursive --include \"*.$i\" " + echo "uploading *.$i" + run="aws s3 cp $src $dst $OPTIONS --profile $BUCKET --cache-control $cache --acl public-read $include" + echo "=======================" + echo "$run" + echo "=======================" + $run + + # Make sure the search_content.json.gz file has the right content-encoding + aws s3 cp --profile $BUCKET --cache-control $cache --content-encoding="gzip" --acl public-read "site/search_content.json.gz" "$dst" +} + +invalidate_cache() { + if [ "" == "$DISTRIBUTION_ID" ]; then + echo "Skipping Cloudfront cache invalidation" + return + fi + + dst=$1 + + aws configure set preview.cloudfront true + + files=$(find site/ -not -name "*.md*" -type f | sed 's/site\///g') + + len=${#files[@]} + + echo "aws cloudfront create-invalidation --profile $AWS_S3_BUCKET --distribution-id $DISTRIBUTION_ID --invalidation-batch '" > batchfile + echo "{\"Paths\":{\"Quantity\":$len," >> batchfile + echo "\"Items\": [" >> batchfile + + for file in "${files[@]}" + do + if [ "$file" == "${files[${#files[@]}-1]}" ]; then + comma="" + else + comma="," + fi + echo "\"$dst$file\"$comma" >> batchfile + done + + echo "]}, \"CallerReference\":" >> batchfile + echo "\"$(date)\"}'" >> batchfile + + + echo "-----" + cat batchfile + echo "-----" + sh batchfile + echo "-----" +} + + +if [ "$OPTIONS" != "--dryrun" ]; then + setup_s3 +fi + +# Default to only building the version specific docs so we don't clober the latest by accident with old versions +if [ "$BUILD_ROOT" == "yes" ]; then + echo "Building root documentation" + build_current_documentation + upload_current_documentation + [ "$NOCACHE" ] || invalidate_cache +fi + +#build again with /v1.0/ prefix +sed -i "s/^site_url:.*/site_url: \/$MAJOR_MINOR\//" mkdocs.yml +echo "Building the /$MAJOR_MINOR/ documentation" +build_current_documentation +upload_current_documentation "/$MAJOR_MINOR/" +[ "$NOCACHE" ] || invalidate_cache "/$MAJOR_MINOR"