diff --git a/.gitignore b/.gitignore index c2eaf90e..4c32d0b3 100644 --- a/.gitignore +++ b/.gitignore @@ -35,4 +35,5 @@ tiler-server/imposm/ overpass-api-db/ data/*/ !data/README.md -images/changeset-replication-job/config.yaml \ No newline at end of file +images/changeset-replication-job/config.yaml +values_copy.yaml \ No newline at end of file diff --git a/chartpress.yaml b/chartpress.yaml index 79aeab27..e6779faf 100644 --- a/chartpress.yaml +++ b/chartpress.yaml @@ -1,46 +1,52 @@ charts: - - name: osm-seed - imagePrefix: developmentseed/osmseed- - repo: - git: developmentseed/osm-seed-chart - published: https://devseed.com/osm-seed-chart +- name: osm-seed + imagePrefix: developmentseed/osmseed- + repo: + git: developmentseed/osm-seed-chart + published: https://devseed.com/osm-seed-chart - images: - web: - valuesPath: web.image - db: - valuesPath: db.image - backup-restore: - valuesPath: dbBackupRestore.image - planet-dump: - valuesPath: planetDump.image - full-history: - valuesPath: fullHistory.image - replication-job: - valuesPath: replicationJob.image - populate-apidb: - valuesPath: populateApidb.image - osm-processor: - valuesPath: osmProcessor.image - tiler-db: - valuesPath: tilerDb.image - tiler-imposm: - valuesPath: tilerImposm.image - tiler-server: - valuesPath: tilerServer.image - tasking-manager-api: - valuesPath: tmApi.image - # tiler-visor: - # valuesPath: tilerVisor.image - nominatim: - valuesPath: nominatimApi.image - overpass-api: - valuesPath: overpassApi.image - taginfo: - valuesPath: taginfo.image - osm-simple-metrics: - valuesPath: osmSimpleMetrics.image - changeset-replication-job: - valuesPath: changesetReplicationJob.image - cgimap: - valuesPath: cgimap.image \ No newline at end of file + images: + web: + valuesPath: web.image + db: + valuesPath: db.image + backup-restore: + valuesPath: dbBackupRestore.image + planet-dump: + valuesPath: planetDump.image + full-history: + valuesPath: fullHistory.image + replication-job: + valuesPath: replicationJob.image + populate-apidb: + valuesPath: populateApidb.image + osm-processor: + valuesPath: osmProcessor.image + tiler-db: + valuesPath: tilerDb.image + tiler-imposm: + valuesPath: tilerImposm.image + tiler-server: + valuesPath: tilerServer.image + tasking-manager-api: + valuesPath: tmApi.image + # tiler-visor: + # valuesPath: tilerVisor.image + nominatim: + valuesPath: nominatimApi.image + overpass-api: + valuesPath: overpassApi.image + taginfo: + valuesPath: taginfo.image + osm-simple-metrics: + valuesPath: osmSimpleMetrics.image + changeset-replication-job: + valuesPath: changesetReplicationJob.image + osmcha-web: + valuesPath: osmchaWeb.image + osmcha-db: + valuesPath: osmchaDb.image + planet-files: + valuesPath: planetFiles.image + cgimap: + valuesPath: cgimap.image diff --git a/compose/db-backup-restore.yml b/compose/db-backup-restore.yml index 7f98764e..982e94ad 100644 --- a/compose/db-backup-restore.yml +++ b/compose/db-backup-restore.yml @@ -9,13 +9,10 @@ services: context: ../images/backup-restore dockerfile: Dockerfile volumes: - - ../data/db-backup-restore-data:/mnt/data - command: > - /bin/bash -c " - echo 'Set cron job for backing up DB, every 4 minutes'; - while :; do echo 'Creating DB backup...'; /start.sh; sleep 4m; done; - " + - ../data/db-backup-restore-data:/mnt/data + command: /start.sh env_file: - - ../envs/.env.db - - ../envs/.env.db-utils - - ../envs/.env.cloudprovider + - ../envs/.env.db + - ../envs/.env.osmcha + - ../envs/.env.db-utils + - ../envs/.env.cloudprovider diff --git a/compose/full-history.yml b/compose/full-history.yml deleted file mode 100644 index f5bf586a..00000000 --- a/compose/full-history.yml +++ /dev/null @@ -1,22 +0,0 @@ -version: '3' -services: - ##################################################### - ## OSM full planet replication - ##################################################### - full-history: - image: osmseed-full-history:v1 - build: - context: ../images/full-history - dockerfile: Dockerfile - volumes: - - ../data/full-history-data:/mnt/data - command: > - /bin/bash -c " - echo 'Set cron job for full history PBF file, every 4 minutes'; - while :; do echo 'Creating full history PBF file...'; /start.sh; sleep 4m; done; - " - env_file: - - ../envs/.env.db - - ../envs/.env.db-utils - - ../envs/.env.cloudprovider - \ No newline at end of file diff --git a/compose/nominatim.yml b/compose/nominatim.yml index cb0a1c07..ff3349d2 100644 --- a/compose/nominatim.yml +++ b/compose/nominatim.yml @@ -10,9 +10,10 @@ services: dockerfile: Dockerfile ports: - '8080:8080' - - '5432:5432' + # - '5432:5432' volumes: - ../data/nominatim-data:/var/lib/postgresql/14/main # - ../../Nominatim/nominatim/tokenizer/sanitizers:/usr/local/lib/nominatim/lib-python/nominatim/tokenizer/sanitizers env_file: - ../envs/.env.nominatim + restart: always diff --git a/compose/osmcha.yml b/compose/osmcha.yml new file mode 100644 index 00000000..0761809d --- /dev/null +++ b/compose/osmcha.yml @@ -0,0 +1,60 @@ +version: "3.8" +services: + web: + platform: linux/amd64 + image: developmentseed/osmseed-osmcha-web:v16 + build: + context: ../images/osmcha-web + dockerfile: Dockerfile + env_file: + - ../envs/.env.osmcha + volumes: + - ../data/osmcha/staticfiles:/staticfiles + db: + platform: linux/amd64 + image: osmseed-osmcha-db:v1 + build: + context: ../images/osmcha-db + dockerfile: Dockerfile + ports: + - "5432:5432" + volumes: + - ../data/osmcha-db-data:/var/lib/postgresql/data + env_file: + - ../envs/.env.osmcha + init: + platform: linux/amd64 + image: ghcr.io/willemarcel/osmcha-django:b1f4e6afc90e08707cadc4d74580632ca3b93dd2 + command: > + /bin/bash -c " + set -x + python manage.py collectstatic + python manage.py migrate + mkdir -p /staticfiles/static + cp -r /app/staticfiles/* /staticfiles/static/ + " + env_file: + - ../envs/.env.osmcha + volumes: + - ../data/osmcha/staticfiles:/staticfiles + api: + platform: linux/amd64 + image: ghcr.io/willemarcel/osmcha-django:b1f4e6afc90e08707cadc4d74580632ca3b93dd2 + build: + context: ../images/osmcha-api + dockerfile: Dockerfile + command: > + /bin/bash -c " + set -x + python manage.py collectstatic + python manage.py migrate + mkdir -p /staticfiles/static + cp -r /app/staticfiles/* /staticfiles/static/ + gunicorn --workers 4 --bind 0.0.0.0:5000 --log-file - --access-logfile - config.wsgi + " + ports: + - "5000:5000" + env_file: + - ../envs/.env.osmcha + volumes: + - ../data/osmcha/staticfiles:/staticfiles diff --git a/compose/planet-dump.yml b/compose/planet-dump.yml deleted file mode 100644 index b265a768..00000000 --- a/compose/planet-dump.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: '3' -services: - ###################################################### - ### OSM processor and also use this image as base for other containers - ###################################################### - osm-processor: - image: osmseed-osm-processor:v1 - build: - context: ../images/osm-processor - dockerfile: Dockerfile - ###################################################### - ### Planet replication section - ###################################################### - planet-dump: - image: osmseed-planet-dump:v1 - build: - context: ./images/planet-dump - dockerfile: Dockerfile - volumes: - - ../data/planet-dump-data:/mnt/data - command: > - /bin/bash -c " - echo 'Set cronjob for planet-dump, every 4 minutes'; - while :; do echo 'Creating the planet dump file...'; /start.sh; sleep 4m; done; - " - env_file: - - ../envs/.env.db - - ../envs/.env.db-utils - - ../envs/.env.cloudprovider - diff --git a/compose/planet.yml b/compose/planet.yml new file mode 100644 index 00000000..c16f9990 --- /dev/null +++ b/compose/planet.yml @@ -0,0 +1,97 @@ +version: '3' +services: + ###################################################### + ### OSM minute replication files section + ###################################################### + replication-job: + image: osmseed-replication-job:v1 + build: + context: ../images/replication-job + dockerfile: Dockerfile + volumes: + - ../data/replication-job-data:/mnt/data + # - ./../images/replication-job:/app + command: /start.sh + env_file: + - ../envs/.env.db + - ../envs/.env.db-utils + - ../envs/.env.cloudprovider + restart: always + + ###################################################### + ### Planet replication section + ###################################################### + planet-dump: + image: osmseed-planet-dump:v1 + build: + context: ../images/planet-dump + dockerfile: Dockerfile + volumes: + - ../data/planet-dump-data:/mnt/data + command: /start.sh; + env_file: + - ../envs/.env.db + - ../envs/.env.db-utils + - ../envs/.env.cloudprovider + restart: always + + ##################################################### + ## OSM full planet replication + ##################################################### + full-history: + image: osmseed-full-history:v1 + build: + context: ../images/full-history + dockerfile: Dockerfile + volumes: + - ../data/full-history-data:/mnt/data + command: /start.sh + env_file: + - ../envs/.env.db + - ../envs/.env.db-utils + - ../envs/.env.cloudprovider + restart: always + + ##################################################### + ## Changeset replications + ##################################################### + changeset-replication-job: + image: osmseed-changeset-replication-job:v1 + build: + context: ../images/changeset-replication-job + dockerfile: Dockerfile + volumes: + - ../data/changeset-replication-job-data:/mnt/changesets + command: > + /bin/bash -c "./start.sh" + env_file: + - ../envs/.env.db + - ../envs/.env.db-utils + - ../envs/.env.cloudprovider + restart: always + + ###################################################### + ### NGINX container for serving files + ###################################################### + planet-files: + image: osmseed-planet-files:v1 + build: + context: ../images/planet-files + dockerfile: Dockerfile + ports: + - "8081:80" + - "8082:3000" + volumes: + # Serve minute replication files + - ../data/replication-job-data:/usr/share/nginx/html/server/static-files/replication/minute + # Serve planet dump files + - ../data/planet-dump-data:/usr/share/nginx/html/server/static-files/planet + # Serve full history planet file + - '.../data/full-history-data:/usr/share/nginx/html/server/static-files/full-planet' + - '../data/changeset-replication-job-data:/usr/share/nginx/html/server/static-files/changesets' + # Development mode + # - ./../images/planet-files/:/usr/share/nginx/html/ + restart: always +networks: + osm-seed: + driver: bridge \ No newline at end of file diff --git a/compose/replication.yml b/compose/replication.yml deleted file mode 100644 index 6cdba218..00000000 --- a/compose/replication.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: '3' -services: - ###################################################### - ### OSM minute replication files section - ###################################################### - replication-job: - image: osmseed-replication-job:v1 - build: - context: ../images/replication-job - dockerfile: Dockerfile - volumes: - - ../data/replication-job-data:/mnt/data - command: > - /bin/bash -c " echo Sleep the replication-job for 1 minute; sleep 1m; echo Creating the replication files!; /start.sh " - env_file: - - ../envs/.env.db - - ../envs/.env.db-utils - - ../envs/.env.cloudprovider - changeset-replication-job: - image: osmseed-changeset-replication-job:v1 - build: - context: ../images/changeset-replication-job - dockerfile: Dockerfile - volumes: - - ../data/changeset-replication-job-data:/mnt/changesets - # - ./../images/changeset-replication-job:/openstreetmap-changeset-replication - command: > - /bin/bash -c "./start.sh" - env_file: - - ../envs/.env.db - - ../envs/.env.db-utils - - ../envs/.env.cloudprovider diff --git a/compose/taginfo.yml b/compose/taginfo.yml index 6e4256ac..a3acc668 100644 --- a/compose/taginfo.yml +++ b/compose/taginfo.yml @@ -9,11 +9,11 @@ services: context: ../images/taginfo dockerfile: Dockerfile ports: - - '4567:80' + - '8000:80' volumes: - - ../data/taginfo-data:/apps/data/ + - ../data/taginfo-data:/usr/src/app/data command: > - /bin/bash -c " - ./start.sh" + /bin/bash -c "/usr/src/app/start.sh web" + env_file: - - ../envs/.env.taginfo \ No newline at end of file + - ../envs/.env.taginfo diff --git a/compose/tasking-manager.yml b/compose/tasking-manager.yml index 178e87cb..d61b52e0 100644 --- a/compose/tasking-manager.yml +++ b/compose/tasking-manager.yml @@ -1,16 +1,33 @@ version: '3' services: - ###################################################### - ### Tasking Manager section - ###################################################### - tasking-manager-api: + tmdb: + image: mdillon/postgis:11 + # ports: + # - '5432:5432' + volumes: + - ../data/tasking-manager-db:/db + env_file: + - ../envs/.env.tasking-manager-db + restart: always + migration: image: osmseed-tasking-manager-api:v1 build: context: ../images/tasking-manager-api dockerfile: Dockerfile + restart: on-failure + command: > + sh -c "sleep 10s && python3 manage.py db upgrade" + env_file: + - ../envs/.env.tasking-manager + - ../envs/.env.tasking-manager-db + tmapi: + build: + context: ../images/tasking-manager-api + dockerfile: Dockerfile + image: osmseed-tasking-manager-api:v1 ports: - - '5000:5000' - volumes: - - ../data/tasking-manager-api-db:/db + - '5000:5000' env_file: - - ../envs/.env.tasking-manager \ No newline at end of file + - ../envs/.env.tasking-manager + - ../envs/.env.tasking-manager-db + restart: always diff --git a/compose/tiler.yml b/compose/tiler.yml index a880d3d8..11d15502 100644 --- a/compose/tiler.yml +++ b/compose/tiler.yml @@ -1,4 +1,4 @@ -version: '3' +version: '3.8' services: # ###################################################### # ### Tiler DB @@ -8,12 +8,13 @@ services: build: context: ../images/tiler-db dockerfile: Dockerfile - ports: - - "5433:5432" + # ports: + # - "5433:5432" volumes: - - ../data/tiler-db-data:/var/lib/postgresql/data + - ../data/tiler-db-data:/var/lib/postgresql/data env_file: - - ../envs/.env.tiler-db + - ../envs/.env.tiler-db + restart: always # ###################################################### # ### Tiler imposm for importing data # ###################################################### @@ -23,20 +24,25 @@ services: context: ../images/tiler-imposm dockerfile: Dockerfile volumes: - - ../data/tiler-imposm-data:/mnt/data + - ../data/tiler-imposm-data:/mnt/data + # - ../images/tiler-imposm:/osm depends_on: - - tiler-db - command: > - /bin/bash -c " - echo starting imposm!; - sleep 1m; - ./start.sh; - " + - tiler-db + command: + - sh + - -c + - "sleep 60 && ./start.sh" env_file: - - ../envs/.env.tiler-db - - ../envs/.env.tiler-imposm + - ../envs/.env.tiler-db + - ../envs/.env.tiler-imposm + healthcheck: + test: ["CMD", "./liveness.sh"] + interval: 30s + timeout: 10s + retries: 3 + restart: always # ###################################################### - # ### Tiler imposm for importing data + # ### Tiler server # ###################################################### tiler-server: image: osmseed-tiler-server:v1 @@ -44,18 +50,19 @@ services: context: ../images/tiler-server dockerfile: Dockerfile volumes: - - ../data/tiler-server-data:/mnt/data + - ../data/tiler-server-data:/mnt/data + - ../data/tiler-imposm-data:/mnt/data/imposm depends_on: - - tiler-db - - tiler-imposm + - tiler-db + - tiler-imposm ports: - - "9090:9090" - command: > - /bin/bash -c " - echo Starting tiles server!; - sleep 2m; - ./start.sh; - " + - "9090:9090" + command: + - sh + - -c + - "./start.sh & ./expire-watcher.sh" env_file: - - ../envs/.env.tiler-db - - ../envs/.env.tiler-server \ No newline at end of file + - ../envs/.env.tiler-db + - ../envs/.env.tiler-server + restart: always + \ No newline at end of file diff --git a/compose/web.yml b/compose/web.yml index 1b90b4b1..cc6bbf10 100644 --- a/compose/web.yml +++ b/compose/web.yml @@ -9,21 +9,28 @@ services: build: context: ../images/db dockerfile: Dockerfile - ports: - - '5432:5432' + # ports: + # - '5432:5432' volumes: - - ../data/db-data:/var/lib/postgresql/data + - ../data/db-data-pg14:/var/lib/postgresql/data env_file: - - ../envs/.env.db + - ../envs/.env.db + restart: always web: image: osmseed-web:v1 build: context: ../images/web dockerfile: Dockerfile ports: - - '80:80' + - '80:80' env_file: - - ../envs/.env.web - - ../envs/.env.db + - ../envs/.env.web + - ../envs/.env.db depends_on: - - db + - db + restart: always + memcache: + image: memcached:latest + ports: + - '11211:11211' + restart: always diff --git a/envs/.env.osmcha.example b/envs/.env.osmcha.example new file mode 100644 index 00000000..7c3d265c --- /dev/null +++ b/envs/.env.osmcha.example @@ -0,0 +1,24 @@ +POSTGRES_DB=osmcha +POSTGRES_USER=postgres +POSTGRES_PASSWORD=1234 +PGHOST=osmcha-db +DJANGO_DEBUG=true +DJANGO_SECRET_KEY=abc +OAUTH_OSM_KEY=xya +OAUTH_OSM_SECRET=xya +DJANGO_ANON_USER_THROTTLE_RATE=30/min +DJANGO_COMMON_USER_THROTTLE_RATE=180/min +DJANGO_NON_STAFF_USER_THROTTLE_RATE=3/min +OSMCHA_URL=public-url +OAUTH_REDIRECT_URI=$OSMCHA_URL/oauth-landing.html +DJANGO_ENABLE_CHANGESET_COMMENTS=False +DJANGO_OSM_COMMENTS_API_KEY="" +DJANGO_ROOT=/app +DJANGO_SETTINGS_MODULE=config.settings.local +C_FORCE_ROOT=True +REACT_APP_OSM_URL=https://www.openhistoricalmap.org +REACT_APP_OSM_API=https://www.openhistoricalmap.org/api/0.6 +REACT_APP_OVERPASS_BASE=https://overpass-api.openhistoricalmap.org/api/interpreter +REACT_APP_NOMINATIM_URL=https://www.openhistoricalmap.org/geocoder/search_osm_nominatim +REACT_APP_PRODUCTION_API_URL=$OSMCHA_URL/api/v1 +REACT_APP_MAPBOX_ACCESS_TOKEN='' diff --git a/envs/.env.tasking-manager-db.example b/envs/.env.tasking-manager-db.example new file mode 100644 index 00000000..9176a7d1 --- /dev/null +++ b/envs/.env.tasking-manager-db.example @@ -0,0 +1,4 @@ +POSTGRES_DB=tm +POSTGRES_USER=postgres +POSTGRES_PASSWORD=1234 +POSTGRES_PORT=5432 \ No newline at end of file diff --git a/envs/.env.tasking-manager.example b/envs/.env.tasking-manager.example index 902a262c..5887b66e 100644 --- a/envs/.env.tasking-manager.example +++ b/envs/.env.tasking-manager.example @@ -37,20 +37,15 @@ POTLATCH2_EDITOR_URL=https://www.openstreetmap.org/edit?editor=potlatch2 # TM_SECRET=xyz -# OpenStreetMap OAuth consumer key and secret (required) -# +# OpenStreetMap OAuth 1 consumer key and secret (required) + TM_CONSUMER_KEY=foo TM_CONSUMER_SECRET=xyz TM_DEFAULT_CHANGESET_COMMENT="#{osmseed}-project" # The connection to the postgres database (required) -# -POSTGRES_DB=tm -POSTGRES_USER=postgres -POSTGRES_PASSWORD=tm -POSTGRES_ENDPOINT=localhost -POSTGRES_PORT=5432 +POSTGRES_HOST=tmdb # The address to use as the sender on auto generated emails (optional) # diff --git a/envs/.env.web.example b/envs/.env.web.example index e20a31d2..81e62ab7 100644 --- a/envs/.env.web.example +++ b/envs/.env.web.example @@ -20,15 +20,27 @@ MAILER_USERNAME=test@gmail.com MAILER_PASSWORD=abc... MAILER_FROM=no-reply@osmseed.org MAILER_PORT=25 +OSM_memcache_servers="" # Nominatim settings NOMINATIM_URL=nominatim-api -# TODO: Describe those vars -OSM_id_key="abcd..." -OSM_memcache_servers="" +# Make sure that the first time this (OPENSTREETMAP_id_key) value is empty. +# Once you start you server and create your OAuth 2. +### Create OAuth 2 applications +### Client ID=OPENSTREETMAP_id_key +OPENSTREETMAP_id_key="xyz..." + +### OpenStreetMap Web Site +# Client ID=OAUTH_CLIENT_ID +# Client Secret=OAUTH_KEY +OAUTH_CLIENT_ID="abc..." +OAUTH_KEY="efg..." # NEW_RELIC settings # TODO: Needs to be implemented the New Relic functions in order to make running in the web container NEW_RELIC_LICENSE_KEY="..." NEW_RELIC_APP_NAME="..." + +### Set organization name, by default : OpenStreetMap +ORGANIZATION_NAME=OSMSeed diff --git a/images/backup-restore/Dockerfile b/images/backup-restore/Dockerfile index b4356155..95996a98 100644 --- a/images/backup-restore/Dockerfile +++ b/images/backup-restore/Dockerfile @@ -1,19 +1,23 @@ -FROM python:3.9.9 -RUN apt-get update -RUN apt-get install -y \ - curl \ - postgresql-client +FROM python:3.9.9-slim -# Install AWS CLI -RUN pip install awscli +# Install Postgres client, GCP CLI, and Azure CLI +RUN apt-get update \ + && apt-get install -y curl apt-transport-https lsb-release gnupg \ + && echo "deb http://apt.postgresql.org/pub/repos/apt/ bullseye-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ + && curl -L https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + && curl -sL https://aka.ms/InstallAzureCLIDeb | bash \ + && apt-get update \ + && apt-get install -y postgresql-client-14 wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* # Install GCP CLI -RUN curl -sSL https://sdk.cloud.google.com | bash -RUN ln -f -s /root/google-cloud-sdk/bin/gsutil /usr/bin/gsutil +RUN curl -sSL https://sdk.cloud.google.com | bash \ + && ln -f -s /root/google-cloud-sdk/bin/gsutil /usr/bin/gsutil +RUN rm -rf /root/google-cloud-sdk/.install/.backup -# Install Azure CLI -RUN curl -sL https://aka.ms/InstallAzureCLIDeb | bash +# Install AWS CLI +RUN pip install awscli -VOLUME /mnt/data COPY ./start.sh / -CMD /start.sh +CMD ["/start.sh"] diff --git a/images/backup-restore/start.sh b/images/backup-restore/start.sh index 2f21a0ad..e9fa1240 100755 --- a/images/backup-restore/start.sh +++ b/images/backup-restore/start.sh @@ -1,65 +1,111 @@ #!/usr/bin/env bash +set -e export PGPASSWORD=$POSTGRES_PASSWORD -export VOLUME_DIR=/mnt/data +# Upload files +cloudStorageOps() { + local LOCAL_STATE_FILE=state.txt + local filepath=$1 + local cloudpath=$2 -date=$(date '+%y%m%d_%H%M') -local_backupFile=$VOLUME_DIR/osmseed-db-${date}.sql.gz -cloud_backupFile=database/osmseed-db-${date}.sql.gz -stateFile=$VOLUME_DIR/state.txt -restoreFile=$VOLUME_DIR/backup.sql.gz - -echo "Start...$DB_ACTION action" -# Backing up DataBase -if [ "$DB_ACTION" == "backup" ]; then - # Backup database and make maximum compression at the slowest speed - pg_dump -h $POSTGRES_HOST -U $POSTGRES_USER $POSTGRES_DB | gzip -9 >$local_backupFile - - # AWS - if [ "$CLOUDPROVIDER" == "aws" ]; then - echo "$AWS_S3_BUCKET/$cloud_backupFile" > $stateFile - # Upload db backup file - aws s3 cp $local_backupFile $AWS_S3_BUCKET/$cloud_backupFile - # Upload state.txt file - aws s3 cp $stateFile $AWS_S3_BUCKET/database/state.txt - fi - - # GCP - if [ "$CLOUDPROVIDER" == "gcp" ]; then - echo "$GCP_STORAGE_BUCKET/$cloud_backupFile" > $stateFile - # Upload db backup file - gsutil cp $local_backupFile $GCP_STORAGE_BUCKET/$cloud_backupFile - # Upload state.txt file - gsutil cp $stateFile $GCP_STORAGE_BUCKET/database/state.txt - fi - - # Azure - if [ "$CLOUDPROVIDER" == "azure" ]; then - # Save the path file - echo "blob://$AZURE_STORAGE_ACCOUNT/$AZURE_CONTAINER_NAME/$cloud_backupFile" > $stateFile - # Upload db backup file + case "${CLOUDPROVIDER}" in + aws) + aws s3 cp ${filepath} s3://${AWS_S3_BUCKET}/${cloudpath} + echo s3://${AWS_S3_BUCKET}/${cloudpath} >${LOCAL_STATE_FILE} + aws s3 cp ${LOCAL_STATE_FILE} s3://${AWS_S3_BUCKET}/${BACKUP_CLOUD_FOLDER}/state.txt + ;; + gcp) + gsutil cp ${filepath} gs://${GCP_STORAGE_BUCKET}/${cloudpath} + echo "gs://${GCP_STORAGE_BUCKET}/${CLOUD_BACKUP_FILE}" >${LOCAL_STATE_FILE} + gsutil cp ${LOCAL_STATE_FILE} gs://${GCP_STORAGE_BUCKET}/${BACKUP_CLOUD_FOLDER}/state.txt + ;; + azure) az storage blob upload \ - --container-name $AZURE_CONTAINER_NAME \ - --file $local_backupFile \ - --name $cloud_backupFile \ + --container-name ${AZURE_CONTAINER_NAME} \ + --file ${filepath} \ + --name ${cloudpath} \ --output table - # Upload state.txt file + echo "blob://${AZURE_STORAGE_ACCOUNT}/${AZURE_CONTAINER_NAME}/${CLOUD_BACKUP_FILE}" >${LOCAL_STATE_FILE} az storage blob upload \ - --container-name $AZURE_CONTAINER_NAME \ - --file $stateFile \ - --name database/state.txt \ + --container-name ${AZURE_CONTAINER_NAME} \ + --file ${LOCAL_STATE_FILE} \ + --name ${BACKUP_CLOUD_FOLDER}/state.txt \ --output table + ;; + esac +} + +backupDB() { + local LOCAL_BACKUP_FILE=${BACKUP_CLOUD_FILE}.sql.gz + local CLOUD_BACKUP_FILE="${BACKUP_CLOUD_FOLDER}/${BACKUP_CLOUD_FILE}.sql.gz" + if [ "$SET_DATE_AT_NAME" == "true" ]; then + local CURRENT_DATE=$(date '+%Y%m%d-%H%M') + LOCAL_BACKUP_FILE="${BACKUP_CLOUD_FILE}-${CURRENT_DATE}.sql.gz" + CLOUD_BACKUP_FILE="${BACKUP_CLOUD_FOLDER}/${BACKUP_CLOUD_FILE}-${CURRENT_DATE}.sql.gz" fi -fi -# Restoring DataBase -if [ "$DB_ACTION" == "restore" ]; then - # AWS - flag=true + # Backup database with max compression + echo "Backing up DB ${POSTGRES_DB} into ${LOCAL_BACKUP_FILE}" + pg_dump -h ${POSTGRES_HOST} -U ${POSTGRES_USER} ${POSTGRES_DB} | gzip -9 >${LOCAL_BACKUP_FILE} + + # Handle cloud storage based on the provider + cloudStorageOps "${LOCAL_BACKUP_FILE}" "${CLOUD_BACKUP_FILE}" +} + +restoreDB() { + local CURRENT_DATE=$(date '+%Y%m%d-%H%M') + local RESTORE_FILE="backup.sql.gz" + local LOG_RESULT_FILE="restore_results-${CURRENT_DATE}.log" + local flag=true + while "$flag" = true; do - pg_isready -h $POSTGRES_HOST -p 5432 >/dev/null 2>&2 || continue - flag=false - wget -O $restoreFile $RESTORE_URL_FILE - gunzip <$restoreFile | psql -h $POSTGRES_HOST -U $POSTGRES_USER -d $POSTGRES_DB - echo " Import data to $POSTGRES_DB has finished ..." + pg_isready -h ${POSTGRES_HOST} -p 5432 >/dev/null 2>&2 || continue + flag=false + wget -O ${RESTORE_FILE} ${RESTORE_URL_FILE} + echo "Restoring ${RESTORE_URL_FILE} in ${POSTGRES_DB}" + gunzip -c <${RESTORE_FILE} | psql -h ${POSTGRES_HOST} -U ${POSTGRES_USER} -d ${POSTGRES_DB} | tee ${LOG_RESULT_FILE} + # aws s3 cp ${LOG_RESULT_FILE} s3://${AWS_S3_BUCKET}/${LOG_RESULT_FILE} + echo "Import data to ${POSTGRES_DB} has finished ..." + done +} + +delete_old_s3_files() { + # Use RETENTION_DAYS from environment variable or default to 30 days + if [ -z "${RETENTION_DAYS}" ]; then + DAYS_AGO=30 + else + DAYS_AGO="${RETENTION_DAYS}" + fi + + echo "Files older than $DAYS_AGO days will be deleted." + echo "Processing s3://${AWS_S3_BUCKET}/${BACKUP_CLOUD_FOLDER}/" + TARGET_DATE=$(date -d "${DAYS_AGO} days ago" +%Y-%m-%d) + aws s3 ls "s3://${AWS_S3_BUCKET}/${BACKUP_CLOUD_FOLDER}/" --recursive | while read -r line; do + FILE_DATE=$(echo "$line" | awk '{print $1}') + FILE_PATH=$(echo "$line" | awk '{print $4}') + if [[ "$FILE_DATE" < "$TARGET_DATE" && ! -z "$FILE_PATH" ]]; then + echo "Deleting ${FILE_PATH} which was modified on ${FILE_DATE}" + aws s3 rm "s3://${AWS_S3_BUCKET}/${FILE_PATH}" + fi done +} + +# Main logic +case "${DB_ACTION}" in +backup) + backupDB + ;; +restore) + restoreDB + ;; +*) + echo "Unknown action: ${DB_ACTION}" + exit 1 + ;; +esac + +# Check for the CLEAN_BACKUPS var +if [ "$CLEANUP_BACKUPS" == "true" ]; then + delete_old_s3_files +else + echo "CLEANUP_BACKUPS is not set to true. Skipping deletion." fi diff --git a/images/db/Dockerfile b/images/db/Dockerfile index c9a5c1e7..a7e6a566 100644 --- a/images/db/Dockerfile +++ b/images/db/Dockerfile @@ -1,25 +1,18 @@ -FROM postgres:11 -RUN rm /etc/apt/sources.list.d/pgdg.list -RUN sed -i s/deb.debian.org/archive.debian.org/g /etc/apt/sources.list -RUN sed -i 's|security.debian.org|archive.debian.org|g' /etc/apt/sources.list -RUN sed -i '/stretch-updates/d' /etc/apt/sources.list -RUN apt-get update && apt-get -y install apt-transport-https -RUN echo "deb [ signed-by=/usr/local/share/keyrings/postgres.gpg.asc ] https://apt-archive.postgresql.org/pub/repos/apt/ stretch-pgdg main 11" >/etc/apt/sources.list.d/pgdg.list +FROM postgres:14 RUN apt-get update \ && apt-get install -y \ - postgresql-server-dev-11 \ + postgresql-server-dev-14 \ make \ build-essential \ - postgresql-11-postgis-2.5 \ + postgresql-14-postgis-3 \ && apt-get clean && rm -rf /var/lib/apt/lists/* - + ADD functions/functions.sql /usr/local/share/osm-db-functions.sql ADD docker_postgres.sh /docker-entrypoint-initdb.d/ RUN mkdir -p db RUN mkdir -p lib ADD functions/ db/functions/ ADD lib/quad_tile/ lib/quad_tile/ - RUN make -C db/functions/ RUN chown -R postgres lib/ -RUN chown -R postgres db/ +RUN chown -R postgres db/ \ No newline at end of file diff --git a/images/full-history/README.md b/images/full-history/README.md index 58900aa2..0d841e22 100644 --- a/images/full-history/README.md +++ b/images/full-history/README.md @@ -12,18 +12,8 @@ In order to run this container we need environment variables, these can be found **Note**: Rename the above files as `.env.db`, `.env.db-utils` and `.env.cloudprovider` -#### Running full-history container - +### Build and bring up the container ```sh - # Docker compose - docker-compose run full-history - - # Docker - docker run \ - --env-file ./envs/.env.db \ - --env-file ./envs/.env.full-history \ - --env-file ./envs/.env.cloudprovider \ - -v ${PWD}/data/full-history-data:/mnt/data \ - --network osm-seed_default \ - -it osmseed-full-history:v1 +docker compose -f ./compose/planet.yml build +docker compose -f ./compose/planet.yml up full-history ``` diff --git a/images/osmcha-db/Dockerfile b/images/osmcha-db/Dockerfile new file mode 100644 index 00000000..7fcb57d9 --- /dev/null +++ b/images/osmcha-db/Dockerfile @@ -0,0 +1,9 @@ +FROM postgres:14 +# Install PostGIS +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + postgis \ + postgresql-14-postgis-3 && \ + rm -rf /var/lib/apt/lists/* +# Initialize PostGIS +COPY init-postgis.sql /docker-entrypoint-initdb.d/ diff --git a/images/osmcha-db/README.md b/images/osmcha-db/README.md new file mode 100644 index 00000000..d751f703 --- /dev/null +++ b/images/osmcha-db/README.md @@ -0,0 +1,12 @@ +# Postgres 14 for OpenStreetMap Changeset Analyzer (OSMCha) + +## Build +```sh +docker compose -f /compose/osmcha.yml build +``` + +## Run + +```sh +docker compose -f /compose/osmcha.yml up +``` \ No newline at end of file diff --git a/images/osmcha-db/init-postgis.sql b/images/osmcha-db/init-postgis.sql new file mode 100644 index 00000000..576e5426 --- /dev/null +++ b/images/osmcha-db/init-postgis.sql @@ -0,0 +1 @@ +CREATE EXTENSION IF NOT EXISTS postgis; diff --git a/images/osmcha-web/Dockerfile b/images/osmcha-web/Dockerfile new file mode 100644 index 00000000..04867f44 --- /dev/null +++ b/images/osmcha-web/Dockerfile @@ -0,0 +1,13 @@ +FROM node:16-slim as builder +ENV DEBIAN_FRONTEND noninteractive +ENV BUILD_ENV=prod +ENV REACT_APP_PRODUCTION_API_URL=/api/v1 +RUN apt-get update && apt-get install -y git \ + && rm -rf /var/lib/apt/lists/* +RUN mkdir /app +WORKDIR /app +ARG GIT_TAG=master +RUN git clone --branch $GIT_TAG https://github.com/osmus/osmcha-frontend.git /app +RUN yarn install +COPY start.sh . +CMD [ "/app/start.sh" ] diff --git a/images/osmcha-web/start.sh b/images/osmcha-web/start.sh new file mode 100755 index 00000000..661b6d28 --- /dev/null +++ b/images/osmcha-web/start.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -x +export BUILD_ENV=prod +export REACT_APP_PRODUCTION_API_URL=/api/v1 +sed -i "s|https://osmcha.org|$OSMCHA_URL|g" package.json +yarn build:${BUILD_ENV} +find /app/build -type f -exec sed -i "s/www.openstreetmap.org/$OSMCHA_API_URL/g" {} + +cp -r /app/build/* /assets/ diff --git a/images/overpass-api/Dockerfile b/images/overpass-api/Dockerfile index 9528668b..de488a70 100644 --- a/images/overpass-api/Dockerfile +++ b/images/overpass-api/Dockerfile @@ -1,4 +1,4 @@ -FROM wiktorn/overpass-api:0.7.56.8 +FROM wiktorn/overpass-api:latest COPY docker-entrypoint.sh /app/ RUN chmod a+rx /app/docker-entrypoint.sh CMD ["/app/docker-entrypoint.sh"] diff --git a/images/overpass-api/docker-entrypoint.sh b/images/overpass-api/docker-entrypoint.sh index 243ddac6..60b94687 100644 --- a/images/overpass-api/docker-entrypoint.sh +++ b/images/overpass-api/docker-entrypoint.sh @@ -1,119 +1,124 @@ #!/bin/bash -set -eo pipefail +set -eox pipefail shopt -s nullglob OVERPASS_META=${OVERPASS_META:-no} OVERPASS_MODE=${OVERPASS_MODE:-clone} OVERPASS_COMPRESSION=${OVERPASS_COMPRESSION:-gz} OVERPASS_FLUSH_SIZE=${OVERPASS_FLUSH_SIZE:-16} -OVERPASS_CLONE_SOURCE=${OVERPASS_CLONE_SOURCE:-http://dev.overpass-api.de/api_drolbr/} +OVERPASS_CLONE_SOURCE=${OVERPASS_CLONE_SOURCE:-https://dev.overpass-api.de/api_drolbr/} # this is used by other processes, so needs to be exported export OVERPASS_MAX_TIMEOUT=${OVERPASS_MAX_TIMEOUT:-1000s} -if [[ "$OVERPASS_META" == "attic" ]] ; then - META="--keep-attic" -elif [[ "${OVERPASS_META}" == "yes" ]] ; then - META="--meta" +if [[ "$OVERPASS_META" == "attic" ]]; then + META="--keep-attic" +elif [[ "${OVERPASS_META}" == "yes" ]]; then + META="--meta" else - META="" + META="" fi for f in /docker-entrypoint-initdb.d/*; do - case "$f" in - *.sh) - if [[ -x "$f" ]]; then - echo "$0: running $f" - "$f" - else - echo "$0: sourcing $f" - . "$f" - fi - ;; - *) echo "$0: ignoring $f" ;; - esac - echo + case "$f" in + *.sh) + if [[ -x "$f" ]]; then + echo "$0: running $f" + "$f" + else + echo "$0: sourcing $f" + # shellcheck disable=SC1090 # ignore SC1090 (unable to follow file) because they are dynamically provided + . "$f" + fi + ;; + *) echo "$0: ignoring $f" ;; + esac + echo done -function startAPIServer(){ - envsubst '${OVERPASS_MAX_TIMEOUT}' < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf - echo "Starting supervisord process" - exec /usr/bin/supervisord -c /etc/supervisor/conf.d/supervisord.conf -} +if [[ ! -f /db/init_done ]]; then + echo "No database directory. Initializing" + if [[ "${USE_OAUTH_COOKIE_CLIENT}" = "yes" ]]; then + /app/venv/bin/python /app/bin/oauth_cookie_client.py -o /db/cookie.jar -s /secrets/oauth-settings.json --format netscape + # necessary to add newline at the end as oauth_cookie_client doesn't do that + echo >>/db/cookie.jar + else + echo "# Netscape HTTP Cookie File" >/db/cookie.jar + echo "${OVERPASS_COOKIE_JAR_CONTENTS}" >>/db/cookie.jar + fi + chown overpass /db/cookie.jar -if [[ ! -f /db/init_done ]] ; then - echo "No database directory. Initializing" - if [[ "${USE_OAUTH_COOKIE_CLIENT}" = "yes" ]]; then - /app/venv/bin/python /app/bin/oauth_cookie_client.py -o /db/cookie.jar -s /secrets/oauth-settings.json --format netscape - # necessary to add newline at the end as oauth_cookie_client doesn't do that - echo >> /db/cookie.jar - else - echo "# Netscape HTTP Cookie File" > /db/cookie.jar - echo "${OVERPASS_COOKIE_JAR_CONTENTS}" >> /db/cookie.jar - fi - chown overpass /db/cookie.jar + if [[ "$OVERPASS_MODE" = "clone" ]]; then + ( + mkdir -p /db/db && + /app/bin/download_clone.sh --db-dir=/db/db --source="${OVERPASS_CLONE_SOURCE}" --meta="${OVERPASS_META}" && + cp /db/db/replicate_id /db/replicate_id && + cp -r /app/etc/rules /db/db && + chown -R overpass:overpass /db/* && + touch /db/init_done + ) || ( + echo "Failed to clone overpass repository" + exit 1 + ) + if [[ "${OVERPASS_STOP_AFTER_INIT}" == "false" ]]; then + echo "Overpass container ready to receive requests" + else + echo "Overpass container initialization complete. Exiting." + exit 0 + fi + fi - if [[ "$OVERPASS_MODE" = "clone" ]]; then - mkdir -p /db/db \ - && /app/bin/download_clone.sh --db-dir=/db/db --source="${OVERPASS_CLONE_SOURCE}" --meta="${OVERPASS_META}" \ - && cp /db/db/replicate_id /db/replicate_id \ - && cp -r /app/etc/rules /db/db \ - && chown -R overpass:overpass /db \ - && touch /db/init_done \ - && echo "Overpass ready, you can start your container with docker start" - exit - fi - - if [[ "$OVERPASS_MODE" = "init" ]]; then - while true ; do - CURL_STATUS_CODE=$(curl -L -b /db/cookie.jar -o /db/planet.osm.bz2 -w "%{http_code}" "${OVERPASS_PLANET_URL}") - case "${CURL_STATUS_CODE}" in - 429) - echo "Server responded with 429 Too many requests. Trying again in 5 minutes..." - sleep 300 - continue - ;; - # for `file:///` scheme curl returns `000` HTTP status code - 200 | 000) - ( - if [[ ! -z "${OVERPASS_PLANET_PREPROCESS+x}" ]]; then - echo "Running preprocessing command: ${OVERPASS_PLANET_PREPROCESS}" - eval "${OVERPASS_PLANET_PREPROCESS}" - fi \ - && /app/bin/init_osm3s.sh /db/planet.osm.bz2 /db/db /app "${META}" "--version=$(osmium fileinfo -e -g data.timestamp.last /db/planet.osm.bz2) --compression-method=${OVERPASS_COMPRESSION} --map-compression-method=${OVERPASS_COMPRESSION} --flush-size=${OVERPASS_FLUSH_SIZE}" \ - && echo "Database created. Now updating it." \ - && cp -r /app/etc/rules /db/db \ - && chown -R overpass:overpass /db \ - && echo "Updating" \ - && /app/bin/update_overpass.sh "-O /db/planet.osm.bz2" \ - && /app/bin/osm3s_query --progress --rules --db-dir=/db/db < /db/db/rules/areas.osm3s \ - && touch /db/init_done \ - && rm /db/planet.osm.bz2 \ - && chown -R overpass:overpass /db \ - && echo $OVERPASS_REPLICATION_SEQUENCE_NUMBER > /db/replicate_id \ - && chmod 777 /db/replicate_id \ - && echo "Overpass ready, you can start your container with docker start" \ - && startAPIServer - ) || ( - echo "Failed to process planet file" - exit - ) - ;; - 403) - echo "Access denied when downloading planet file. Check your OVERPASS_PLANET_URL and OVERPASS_COOKIE_JAR_CONTENTS or USE_OAUTH_COOKIE_CLIENT" - cat /db/cookie.jar - exit - ;; - *) - echo "Failed to download planet file. HTTP status code: ${CURL_STATUS_CODE}" - cat /db/planet.osm.bz2 - exit - ;; - esac - exit - done - fi + if [[ "$OVERPASS_MODE" = "init" ]]; then + CURL_STATUS_CODE=$(curl -L -b /db/cookie.jar -o /db/planet.osm.bz2 -w "%{http_code}" "${OVERPASS_PLANET_URL}") + # try again until it's allowed + while [ "$CURL_STATUS_CODE" = "429" ]; do + echo "Server responded with 429 Too many requests. Trying again in 5 minutes..." + sleep 300 + CURL_STATUS_CODE=$(curl -L -b /db/cookie.jar -o /db/planet.osm.bz2 -w "%{http_code}" "${OVERPASS_PLANET_URL}") + done + # for `file:///` scheme curl returns `000` HTTP status code + if [[ $CURL_STATUS_CODE = "200" || $CURL_STATUS_CODE = "000" ]]; then + ( + if [[ -n "${OVERPASS_PLANET_PREPROCESS+x}" ]]; then + echo "Running preprocessing command: ${OVERPASS_PLANET_PREPROCESS}" + eval "${OVERPASS_PLANET_PREPROCESS}" + fi && + /app/bin/init_osm3s.sh /db/planet.osm.bz2 /db/db /app "${META}" "--version=$(osmium fileinfo -e -g data.timestamp.last /db/planet.osm.bz2) --compression-method=${OVERPASS_COMPRESSION} --map-compression-method=${OVERPASS_COMPRESSION} --flush-size=${OVERPASS_FLUSH_SIZE}" && + echo "Database created. Now updating it." && + cp -r /app/etc/rules /db/db && + chown -R overpass:overpass /db/* && + echo "Updating" && + /app/bin/update_overpass.sh -O /db/planet.osm.bz2 && + if [[ "${OVERPASS_USE_AREAS}" = "true" ]]; then + echo "Generating areas..." && /app/bin/osm3s_query --progress --rules --db-dir=/db/db /etc/nginx/nginx.conf + +echo "Starting supervisord process" +exec /usr/bin/supervisord -c /etc/supervisor/conf.d/supervisord.conf \ No newline at end of file diff --git a/images/planet-dump/README.md b/images/planet-dump/README.md index bc6f7031..bbb7ea17 100644 --- a/images/planet-dump/README.md +++ b/images/planet-dump/README.md @@ -12,18 +12,8 @@ In order to run this container we need environment variables, these can be found **Note**: Rename the above files as `.env.db`, `.env.db-utils` and `.env.cloudprovider` -#### Running planet-dump container - +### Build and bring up the container ```sh - # Docker compose - docker-compose run planet-dump - - # Docker - docker run \ - --env-file ./envs/.env.db \ - --env-file ./envs/.env.planet-dump \ - --env-file ./envs/.env.cloudprovider \ - -v ${PWD}/data/planet-dump-data:/mnt/data \ - --network osm-seed_default \ - -it osmseed-planet-dump:v1 +docker compose -f ./compose/planet.yml build +docker compose -f ./compose/planet.yml up planet-dump ``` diff --git a/images/planet-files/Dockerfile b/images/planet-files/Dockerfile new file mode 100644 index 00000000..4f9aa907 --- /dev/null +++ b/images/planet-files/Dockerfile @@ -0,0 +1,10 @@ +FROM nginx:latest +RUN apt-get update && \ + apt-get install -y nodejs npm && \ + rm -rf /var/lib/apt/lists/* +WORKDIR /usr/share/nginx/html/server/ +COPY server/package.json ./ +RUN npm install +COPY server/server.js ./ +COPY index.html /usr/share/nginx/html/ +CMD ["sh", "-c", "nginx -g 'daemon off;' & node server.js"] diff --git a/images/planet-files/README.MD b/images/planet-files/README.MD new file mode 100644 index 00000000..79407e01 --- /dev/null +++ b/images/planet-files/README.MD @@ -0,0 +1,11 @@ + +# Planet server files + +This container is based on Nginx and serves data related to planet, replication, and changesets for easy access and download, similar to how it is done in OpenStreetMap. + + +# Build and bring up the container +```sh +docker compose -f ./compose/planet.yml build +docker compose -f ./compose/planet.yml up planet-files +``` \ No newline at end of file diff --git a/images/planet-files/index.html b/images/planet-files/index.html new file mode 100644 index 00000000..4c1eac05 --- /dev/null +++ b/images/planet-files/index.html @@ -0,0 +1,105 @@ + + + + + + Planet Replication + + + +
+ + + diff --git a/images/planet-files/server/package.json b/images/planet-files/server/package.json new file mode 100644 index 00000000..f91d0fca --- /dev/null +++ b/images/planet-files/server/package.json @@ -0,0 +1,12 @@ +{ + "name": "websocket-server", + "version": "1.0.0", + "main": "server.js", + "scripts": { + "start": "node server.js" + }, + "dependencies": { + "cors": "^2.8.5", + "express": "^4.17.1" + } +} diff --git a/images/planet-files/server/server.js b/images/planet-files/server/server.js new file mode 100644 index 00000000..227a23b8 --- /dev/null +++ b/images/planet-files/server/server.js @@ -0,0 +1,49 @@ +const express = require("express"); +const cors = require("cors"); +const fs = require("fs"); +const path = require("path"); +const app = express(); +const port = 3000; + +app.use(cors()); +app.get("/:directory(*)", (req, res) => { + const directory = req.params.directory; + const files = getFilesRecursive(path.join("./static-files", directory)); + res.json(files); +}); + +function formatSize(sizeInBytes) { + const KB = 1024; + const MB = KB * 1024; + if (sizeInBytes < KB) { + return (sizeInBytes / KB).toFixed(1) + " kB"; + } else if (sizeInBytes < MB) { + const sizeInKB = (sizeInBytes / KB).toFixed(2); + return sizeInKB + " kB"; + } else { + const sizeInMB = (sizeInBytes / MB).toFixed(2); + return sizeInMB + " mB"; + } + } + +function getFilesRecursive(dir) { + const files = fs.readdirSync(dir); + return files.map((file) => { + const filePath = path.join(dir, file); + const stats = fs.statSync(filePath); + const isDirectory = stats.isDirectory(); + const size = isDirectory ? null : formatSize(stats.size); + const creationDate = isDirectory ? null : stats.mtime.toISOString(); + return { + name: file, + isDirectory, + path: filePath, + size, + creationDate, + }; + }); +} + +app.listen(port, () => { + console.log(`Server is listening at http://localhost:${port}`); +}); diff --git a/images/replication-job/Dockerfile b/images/replication-job/Dockerfile index 2b90ac07..8683fb42 100644 --- a/images/replication-job/Dockerfile +++ b/images/replication-job/Dockerfile @@ -1,7 +1,12 @@ FROM developmentseed/osmseed-osm-processor:v2 +# Install Nginx +RUN apt-get update && \ + apt-get install -y nginx && \ + rm -rf /var/lib/apt/lists/* + RUN pip3 install boto3 -COPY ./start.sh / +COPY ./*.sh / COPY monitoring.py / WORKDIR /mnt/data CMD /start.sh diff --git a/images/replication-job/liveness.sh b/images/replication-job/liveness.sh new file mode 100755 index 00000000..c343c24e --- /dev/null +++ b/images/replication-job/liveness.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# This is a script for the complex evaluation of whether Osmosis or other processes are running in the container. +if [ $(ps -ef | grep -E 'java' | grep -v grep | wc -l) -ge 1 ]; then + echo "Osmosis is running." + exit 0 +else + echo "Osmosis is not running!" 1>&2 + exit 1 +fi diff --git a/images/replication-job/start.sh b/images/replication-job/start.sh index 0435ca85..5653eb7a 100755 --- a/images/replication-job/start.sh +++ b/images/replication-job/start.sh @@ -11,44 +11,101 @@ fi workingDirectory="/mnt/data" mkdir -p $workingDirectory -# Check if state.txt exist in the workingDirectory, -# in case the file does not exist locally and does not exist in the cloud the replication will start from 0 -if [ ! -f $workingDirectory/state.txt ]; then - echo "File $workingDirectory/state.txt does not exist in local storage" - ### AWS - if [ $CLOUDPROVIDER == "aws" ]; then - aws s3 ls $AWS_S3_BUCKET/$REPLICATION_FOLDER/state.txt - if [[ $? -eq 0 ]]; then - echo "File exist, let's get it from $CLOUDPROVIDER - $AWS_S3_BUCKET" - aws s3 cp $AWS_S3_BUCKET/$REPLICATION_FOLDER/state.txt $workingDirectory/state.txt +# Remove files that are not required +[ -e /mnt/data/replicate.lock ] && rm -f /mnt/data/replicate.lock +# [ -e /mnt/data/processed_files.log ] && rm -f /mnt/data/processediles.log + +function get_current_state_file() { + # Check if state.txt exist in the workingDirectory, + # in case the file does not exist locally and does not exist in the cloud the replication will start from 0 + if [ ! -f $workingDirectory/state.txt ]; then + echo "File $workingDirectory/state.txt does not exist in local storage" + ### AWS + if [ $CLOUDPROVIDER == "aws" ]; then + aws s3 ls $AWS_S3_BUCKET/$REPLICATION_FOLDER/state.txt + if [[ $? -eq 0 ]]; then + echo "File exist, let's get it from $CLOUDPROVIDER - $AWS_S3_BUCKET" + aws s3 cp $AWS_S3_BUCKET/$REPLICATION_FOLDER/state.txt $workingDirectory/state.txt + fi fi - fi - - ### GCP - if [ $CLOUDPROVIDER == "gcp" ]; then - gsutil ls $GCP_STORAGE_BUCKET/$REPLICATION_FOLDER/state.txt - if [[ $? -eq 0 ]]; then - echo "File exist, let's get it from $CLOUDPROVIDER - $GCP_STORAGE_BUCKET" - gsutil cp $GCP_STORAGE_BUCKET/$REPLICATION_FOLDER/state.txt $workingDirectory/state.txt + + ### GCP + if [ $CLOUDPROVIDER == "gcp" ]; then + gsutil ls $GCP_STORAGE_BUCKET/$REPLICATION_FOLDER/state.txt + if [[ $? -eq 0 ]]; then + echo "File exist, let's get it from $CLOUDPROVIDER - $GCP_STORAGE_BUCKET" + gsutil cp $GCP_STORAGE_BUCKET/$REPLICATION_FOLDER/state.txt $workingDirectory/state.txt + fi fi - fi - - ### Azure - if [ $CLOUDPROVIDER == "azure" ]; then - state_file_exists=$(az storage blob exists --container-name $AZURE_CONTAINER_NAME --name $REPLICATION_FOLDER/state.txt --query="exists") - if [[ $state_file_exists=="true" ]]; then - echo "File exist, let's get it from $CLOUDPROVIDER - $AZURE_CONTAINER_NAME" - az storage blob download \ - --container-name $AZURE_CONTAINER_NAME \ - --name $REPLICATION_FOLDER/state.txt \ - --file $workingDirectory/state.txt --query="name" + + ### Azure + if [ $CLOUDPROVIDER == "azure" ]; then + state_file_exists=$(az storage blob exists --container-name $AZURE_CONTAINER_NAME --name $REPLICATION_FOLDER/state.txt --query="exists") + if [[ $state_file_exists=="true" ]]; then + echo "File exist, let's get it from $CLOUDPROVIDER - $AZURE_CONTAINER_NAME" + az storage blob download \ + --container-name $AZURE_CONTAINER_NAME \ + --name $REPLICATION_FOLDER/state.txt \ + --file $workingDirectory/state.txt --query="name" + fi fi fi - mkdir -p $workingDirectory -fi +} + +function upload_file_cloud() { + # Upload files to cloud provider + local local_file="$1" + local cloud_file="$REPLICATION_FOLDER/${local_file#*"$workingDirectory/"}" + echo "$(date +%F_%H:%M:%S): Upload file $local_file to ...$CLOUDPROVIDER...$cloud_file" + if [ "$CLOUDPROVIDER" == "aws" ]; then + aws s3 cp "$local_file" "$AWS_S3_BUCKET/$cloud_file" --acl public-read + elif [ "$CLOUDPROVIDER" == "gcp" ]; then + gsutil cp -a public-read "$local_file" "$GCP_STORAGE_BUCKET/$cloud_file" + elif [ "$CLOUDPROVIDER" == "azure" ]; then + az storage blob upload \ + --container-name "$AZURE_CONTAINER_NAME" \ + --file "$local_file" \ + --name "$cloud_file" \ + --output none + fi +} + +function monitor_minute_replication() { + # Function to handle continuous monitoring, minutminutes replication and upload to cloud provider + # Directory to store a log of processed files + processed_files_log="$workingDirectory/processed_files.log" + max_log_size_mb=1 + + while true; do + upload_file_cloud /mnt/data/state.txt + sleep 60s + done & -# Creating the replication files -function generateReplication() { + while true; do + if [ -e "$processed_files_log" ]; then + log_size=$(du -m "$processed_files_log" | cut -f1) + if [ "$log_size" -gt "$max_log_size_mb" ]; then + echo $(date +%F_%H:%M:%S)": Cleaning processed_files_log..." >"$processed_files_log" + fi + for local_minute_file in $(find $workingDirectory/ -cmin -1); do + if [ -f "$local_minute_file" ]; then + if grep -q "$local_minute_file" "$processed_files_log"; then + continue + fi + upload_file_cloud $local_minute_file + echo "$local_minute_file" >>"$processed_files_log" + fi + done + else + echo "File $processed_files_log not found." + echo $processed_files_log >$processed_files_log + fi + sleep 10s + done +} + +function generate_replication() { + # Replicate the API database using Osmosis osmosis -q \ --replicate-apidb \ iterations=0 \ @@ -60,45 +117,32 @@ function generateReplication() { password=$POSTGRES_PASSWORD \ validateSchemaVersion=no \ --write-replication \ - workingDirectory=$workingDirectory & - while true; do - for local_file in $(find $workingDirectory/ -cmin -1); do - if [ -f "$local_file" ]; then - - cloud_file=$REPLICATION_FOLDER/${local_file#*"$workingDirectory/"} - echo $(date +%F_%H:%M:%S)": Copy file...$local_file to $cloud_file" - - ### AWS - if [ $CLOUDPROVIDER == "aws" ]; then - aws s3 cp $local_file $AWS_S3_BUCKET/$cloud_file --acl public-read - fi - - ### GCP - if [ $CLOUDPROVIDER == "gcp" ]; then - #TODO, emable public acces - gsutil cp -a public-read $local_file $GCP_STORAGE_BUCKET/$cloud_file - fi - - ### Azure - if [ $CLOUDPROVIDER == "azure" ]; then - #TODO, emable public acces - az storage blob upload \ - --container-name $AZURE_CONTAINER_NAME \ - --file $local_file \ - --name $cloud_file \ - --output none - fi - fi - done - sleep 15s - done + workingDirectory=$workingDirectory } -# Check if Postgres is ready +# function start_nginx() { +# if [ "$STAR_NGINX_SERVER" = "true" ]; then +# echo 'server { +# listen 8080; +# server_name localhost; + +# location / { +# root /mnt/data; +# index index.html; +# } +# }' >/etc/nginx/nginx.conf +# service nginx restart +# else +# echo "STAR_NGINX_SERVER is either not set or not set to true." +# fi +# } + +######################## Start minutes replication process ######################## +get_current_state_file flag=true while "$flag" = true; do pg_isready -h $POSTGRES_HOST -p 5432 >/dev/null 2>&2 || continue - # Change flag to false to stop ping the DB flag=false - generateReplication + generate_replication & + monitor_minute_replication done diff --git a/images/taginfo/Dockerfile b/images/taginfo/Dockerfile index 1721e262..9c3a0fb7 100644 --- a/images/taginfo/Dockerfile +++ b/images/taginfo/Dockerfile @@ -1,25 +1,9 @@ -FROM ruby:2.7 -ENV workdir /apps +FROM ruby:3.0-slim -# Install Taginfo site -RUN apt-get update && apt-get -y install \ - curl \ - sqlite3 \ - sqlite3-pcre \ - ruby-passenger \ - libapache2-mod-passenger \ - git - -# Commit ae5a950f7aa4c0de4e706839619a1dc05fc4450a, at 2021-10-18 -RUN git clone https://github.com/taginfo/taginfo.git $workdir/taginfo -WORKDIR $workdir/taginfo -RUN git checkout ae5a950f7aa4c0de4e706839619a1dc05fc4450a -RUN echo "gem 'thin' " >>Gemfile -RUN gem install bundler -RUN bundle install - -# Install Taginfo tools -RUN apt-get -y install \ +ARG workdir=/usr/src/app +WORKDIR $workdir + +RUN apt-get update && apt-get install -y \ cmake \ libbz2-dev \ libexpat1-dev \ @@ -30,26 +14,60 @@ RUN apt-get -y install \ libsqlite3-dev \ make \ zlib1g-dev \ + curl \ + sqlite3 \ + sqlite3-pcre \ + passenger \ + libapache2-mod-passenger \ + libreadline-dev \ + tcl \ + git \ jq \ - ca-certificates + python3-pip \ + wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* -# Other useful packages -RUN apt-get install -y \ - git \ - osmium-tool \ - pyosmium \ - rsync \ - tmux \ - zsh - -RUN git clone https://github.com/taginfo/taginfo-tools.git $workdir/taginfo-tools -WORKDIR $workdir/taginfo-tools -RUN git submodule update --init -RUN mkdir build && cd build && cmake .. && make - -RUN apt-get install -y nano vim -COPY overwrite_config.py $workdir/ -COPY start.sh $workdir/ - -WORKDIR $workdir/ -CMD $workdir/start.sh \ No newline at end of file +RUN git clone https://github.com/taginfo/taginfo-tools.git $workdir/taginfo-tools && \ + cd $workdir/taginfo-tools && \ + git submodule update --init && \ + mkdir build && cd build && \ + cmake .. && make +RUN gem install json rack-contrib puma sinatra:'<3' sinatra-r18n:'5.0.2' + +# Install AWS CLI +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ + unzip awscliv2.zip && \ + ./aws/install && \ + rm awscliv2.zip + +RUN git clone https://github.com/taginfo/taginfo.git $workdir/taginfo && \ + cd $workdir/taginfo && \ + bundle install + +# Download and prepare uWSGI +RUN wget http://projects.unbit.it/downloads/uwsgi-latest.tar.gz \ + && tar zxvf uwsgi-latest.tar.gz \ + && rm uwsgi-latest.tar.gz && \ + mv $(ls | grep uwsgi-) uwsgi-dir && \ + cd uwsgi-dir && \ + make PROFILE=nolang && \ + PYTHON=python3 UWSGI_PROFILE_OVERRIDE="ssl=true,rack=true,http=true" make + +# # Set non-root user +# RUN groupadd -r taginfo && useradd -m -r -g taginfo taginfo +# RUN chown -R taginfo:taginfo $workdir +# RUN chmod -R 777 $workdir +# USER taginfo + +# RUN chown -R www-data: $workdir + +WORKDIR $workdir + +# Copy configuration files and scripts +COPY config/taginfo-config.json $workdir/ +COPY start.sh . + +# Expose port and set default command +EXPOSE 80 +CMD ["./start.sh"] diff --git a/images/taginfo/config/taginfo-config.json b/images/taginfo/config/taginfo-config.json new file mode 100644 index 00000000..3350e324 --- /dev/null +++ b/images/taginfo/config/taginfo-config.json @@ -0,0 +1,75 @@ +{ + "instance": { + "url": "http://localhost:80", + "name": "OpenStreetMap Taginfo", + "description": "This is a taginfo test instance. Change this text in your taginfo-config.json.", + "about": "

This site is maintained by osm-seed", + "icon": "/img/logo/test.png", + "contact": "info@osm-seed.org", + "area": "World", + "access_control_allow_origin": "*", + "sections": ["download", "taginfo"] + }, + "geodistribution": { + "left": -180, + "bottom": -90, + "right": 180, + "top": 90, + "width": 360, + "height": 180, + "scale_image": 2, + "scale_compare_image" : 1, + "background_image": "/img/mapbg/world.png", + "image_attribution": "osm-seed" + }, + "paths": { + "data_dir": "/usr/src/app/data", + "download_dir": "/usr/src/app/taginfo/web/public/download", + "bin_dir": "/usr/src/app/taginfo-tools/build/src", + "sqlite3_pcre_extension": "/usr/lib/sqlite3/pcre.so" + }, + "xapi": { + "max_results": 1000, + "url_prefix": "https://overpass-api.de/api/xapi_meta?" + }, + "turbo": { + "max_auto": 1000, + "url_prefix": "https://overpass-turbo.eu/?", + "wizard_area": "global" + }, + "level0": { + "max_results": 50, + "overpass_url_prefix": "https://overpass-api.de/api/interpreter?", + "level0_url_prefix": "http://level0.osmz.ru/?" + }, + "opensearch": { + "shortname": "Taginfo Test Instance", + "contact": "somebody@example.com", + "description": "Find metadata about OpenStreetMap tags", + "tags": "osm openstreetmap tag tags taginfo" + }, + "sources": { + "download": "languages wiki wikidata", + "create": "db projects chronology", + "db": { + "planetfile": "/osm/planet/var/current-planet.osm.pbf", + "bindir": "/apps/data/update/build/src" + }, + "chronology": { + "osm_history_file": "/osm/planet/var/current-history-planet.osh.pbf" + }, + "master": { + "min_count_tags": 10, + "min_tag_combination_count": 10, + "min_count_for_map": 10, + "min_count_relations_per_type": 10 + } + }, + "logging": { + "directory": "/usr/src/app/logs", + "min_duration": 0.1 + }, + "tagstats": { + "geodistribution": "FlexMem" + } +} diff --git a/images/taginfo/overwrite_config.py b/images/taginfo/overwrite_config.py deleted file mode 100644 index 8ef2310a..00000000 --- a/images/taginfo/overwrite_config.py +++ /dev/null @@ -1,45 +0,0 @@ -""" Script to overwrite values in set config - python3 overwrite_config.py \ - -u https://gist.githubusercontent.com/Rub21/1a82fb3e4c0efd15524709a5e2d8ab89/raw/23c399802ba2a01cc30379875ac02a7b1b5ac8e1/taginfo.json\ - -f taginfo-config.json -""" - -import argparse -import urllib.request -import json - -def main(config_file, overwrite_config_url): - with urllib.request.urlopen(overwrite_config_url) as url: - overwrite_values = json.loads(url.read()) - with open(config_file) as f: - current_values = json.loads(f.read()) - # Hardcode for certain values - if 'instance' in overwrite_values.keys(): - current_values['instance'] = overwrite_values['instance'] - if 'turbo' in overwrite_values.keys(): - current_values['turbo'] = overwrite_values['turbo'] - if 'sources' in overwrite_values.keys() and 'master' in overwrite_values['sources'].keys(): - current_values['sources']['master'] = overwrite_values['sources']['master'] - # Overwrite file - with open(config_file, 'w') as f: - f.write(json.dumps(current_values)) - - -parser = argparse.ArgumentParser(description='Set config values') -parser.add_argument( - '-u', - type=str, - help='URL of the config to overwrite', - dest='overwrite_config_url') - -parser.add_argument( - '-f' - '--config_file', - type=str, - help='Path of the config file', - dest='config_file') - -args = parser.parse_args() - -if args.config_file and args.overwrite_config_url: - main(args.config_file, args.overwrite_config_url) diff --git a/images/taginfo/start.sh b/images/taginfo/start.sh index 37a50f02..951d6a41 100755 --- a/images/taginfo/start.sh +++ b/images/taginfo/start.sh @@ -1,50 +1,23 @@ #!/usr/bin/env bash - -WORKDIR=/apps -DATA_DIR=$WORKDIR/data -UPDATE_DIR=$DATA_DIR/update -DOWNLOAD_DIR=$DATA_DIR/download - -set_taginfo_config() { - echo "Setting up...$WORKDIR/taginfo-config.json" - # Update dir values in taginfo-config.json - grep -v '^ *//' $WORKDIR/taginfo/taginfo-config-example.json | - jq '.logging.directory = "'$UPDATE_DIR'/log"' | - jq '.paths.download_dir = "'$UPDATE_DIR'/download"' | - jq '.paths.bin_dir = "'$WORKDIR'/taginfo-tools/build/src"' | - jq '.sources.db.planetfile = "'$UPDATE_DIR'/planet/planet.osm.pbf"' | - jq '.sources.chronology.osm_history_file = "'$UPDATE_DIR'/planet/history-planet.osh.pbf"' | - jq '.sources.db.bindir = "'$UPDATE_DIR'/build/src"' | - jq '.paths.data_dir = "'$DATA_DIR'"' \ - >$WORKDIR/taginfo-config.json - - # languages wiki databases will be downloaded from OSM - [[ ! -z $DOWNLOAD_DB+z} ]] && jq --arg a "${DOWNLOAD_DB}" '.sources.download = $a' $WORKDIR/taginfo-config.json >tmp.json && mv tmp.json $WORKDIR/taginfo-config.json - - # Update instance values in taginfo-config.json - python3 overwrite_config.py -u $OVERWRITE_CONFIG_URL -f $WORKDIR/taginfo-config.json - -} - -updates_create_db() { - local CREATE_DB="$1" - [[ ! -z $CREATE_DB+z} ]] && jq --arg a "${CREATE_DB}" '.sources.create = $a' $WORKDIR/taginfo-config.json >tmp.json && mv tmp.json $WORKDIR/taginfo-config.json -} +set -x +WORKDIR=/usr/src/app +DATADIR=/usr/src/app/data +DATADOWNLOAD=/osm/planet/var +mkdir -p $DATADIR/ +mkdir -p $DATADOWNLOAD/ +mkdir -p $DATADIR/update/log/ updates_source_code() { echo "Update...Procesor source code" + sed -i 's/"env -/"/g' $WORKDIR/taginfo/sources/util.sh + sed -i '/configure do/a \ \ \ \ set :port, 80' $WORKDIR/taginfo/web/taginfo.rb + sed -i "/configure do/a \ \ \ \ set :bind, '0.0.0.0'" $WORKDIR/taginfo/web/taginfo.rb # Function to replace the projects repo to get the projects information TAGINFO_PROJECT_REPO=${TAGINFO_PROJECT_REPO//\//\\/} sed -i -e 's/https:\/\/github.com\/taginfo\/taginfo-projects.git/'$TAGINFO_PROJECT_REPO'/g' $WORKDIR/taginfo/sources/projects/update.sh - # The follow line is requiered to avoid sqlite3 issues - sed -i -e 's/run_ruby "$SRCDIR\/update_characters.rb"/ruby "$SRCDIR\/update_characters.rb"/g' $WORKDIR/taginfo/sources/db/update.sh - sed -i -e 's/run_ruby "$SRCDIR\/import.rb"/ruby "$SRCDIR\/import.rb"/g' $WORKDIR/taginfo/sources/projects/update.sh - sed -i -e 's/run_ruby "$SRCDIR\/parse.rb"/ruby "$SRCDIR\/parse.rb"/g' $WORKDIR/taginfo/sources/projects/update.sh - sed -i -e 's/run_ruby "$SRCDIR\/get_icons.rb"/ruby "$SRCDIR\/get_icons.rb"/g' $WORKDIR/taginfo/sources/projects/update.sh } download_planet_files() { - mkdir -p $UPDATE_DIR/planet/ # Check if URL_PLANET_FILE_STATE exist and set URL_PLANET_FILE if [[ ${URL_PLANET_FILE_STATE} && ${URL_PLANET_FILE_STATE-x} ]]; then wget -q -O state.planet.txt --no-check-certificate - $URL_PLANET_FILE_STATE @@ -56,56 +29,67 @@ download_planet_files() { URL_HISTORY_PLANET_FILE=$(cat state.history.txt) fi # Download pbf files - echo "Downloading...$URL_PLANET_FILE" - wget -q -O $UPDATE_DIR/planet/planet.osm.pbf --no-check-certificate - $URL_PLANET_FILE - echo "Downloading...$URL_HISTORY_PLANET_FILE" - wget -q -O $UPDATE_DIR/planet/history-planet.osh.pbf --no-check-certificate - $URL_HISTORY_PLANET_FILE - rm state.planet.txt - rm state.history.txt + wget -O $DATADOWNLOAD/current-planet.osm.pbf $URL_PLANET_FILE + wget -O $DATADOWNLOAD/current-history-planet.osh.pbf $URL_HISTORY_PLANET_FILE } -update() { - echo "Update...sqlite databases at $(date +%Y-%m-%d:%H-%M)" - # Download OSM planet replication and full-history files +process_data() { download_planet_files - # In order to make it work we need to pass first one by one the creation and then all of them "db projects chronology" - for db in $CREATE_DB; do - echo "Update...taginfo-$db.db" - updates_create_db $db - $WORKDIR/taginfo/sources/update_all.sh $UPDATE_DIR + cd $WORKDIR/taginfo/sources/ + ./update_all.sh $DATADIR + db/update.sh $DATADIR + master/update.sh $DATADIR + projects/update.sh $DATADIR + cp $DATADIR/selection.db $DATADIR/../ + # languages/update.sh $DATADIR + # wiki/update.sh $DATADIR + # wikidata/update.sh $DATADIR + chronology/update.sh $DATADIR + ./update_all.sh $DATADIR + mv $DATADIR/*.db $DATADIR/ + mv $DATADIR/*/*.db $DATADIR/ + # if BUCKET_NAME is set upload data + if ! aws s3 ls "s3://$BUCKET_NAME/$ENVIRONMENT" 2>&1 | grep -q 'An error occurred'; then + aws s3 sync $DATADIR/ s3://$AWS_S3_BUCKET/$ENVIRONMENT/ --exclude "*" --include "*.db" + fi +} + +# Compress files to download +compress_files() { + mkdir -p download + for file in data/*; do + bzip2 -k -9 -c "$file" > "download/$(basename "$file").bz2" done - echo "Update...$CREATE_DB" - updates_create_db $CREATE_DB - $WORKDIR/taginfo/sources/update_all.sh $UPDATE_DIR - # Copy db files into data folder - cp $UPDATE_DIR/*/taginfo-*.db $DATA_DIR/ - cp $UPDATE_DIR/taginfo-*.db $DATA_DIR/ - # Link to download db zip files - chmod a=r $UPDATE_DIR/download - ln -sf $UPDATE_DIR/download $WORKDIR/taginfo/web/public/download } -start_web() { - echo "Start...Taginfo web service" - cd $WORKDIR/taginfo/web && bundle exec rackup --host 0.0.0.0 -p 80 +download_db_files() { + if ! aws s3 ls "s3://$AWS_S3_BUCKET/$ENVIRONMENT" 2>&1 | grep -q 'An error occurred'; then + aws s3 sync "s3://$AWS_S3_BUCKET/$ENVIRONMENT/" "$DATADIR/" + mv $DATADIR/*.db $DATADIR/ + mv $DATADIR/*/*.db $DATADIR/ + compress_files + fi } -continuous_update() { +sync_latest_db_version() { while true; do - update - sleep $TIME_UPDATE_INTERVAL + sleep "$INTERVAL_DOWNLOAD_DATA" + download_db_files done } -main() { - set_taginfo_config - updates_source_code - # Check if db files are store in the $DATA_DIR in order to start the service or start procesing the file - NUM_DB_FILES=$(ls $DATA_DIR/*.db | wc -l) - if [ $NUM_DB_FILES -lt 7 ]; then - update - fi - start_web & - continuous_update +start_web() { + echo "Start...Taginfo web service" + download_db_files + cd $WORKDIR/taginfo/web && ./taginfo.rb & sync_latest_db_version } -main + +ACTION=$1 +# Overwrite the config file +[[ ! -z ${OVERWRITE_CONFIG_URL} ]] && wget $OVERWRITE_CONFIG_URL -O /usr/src/app/taginfo-config.json +updates_source_code +if [ "$ACTION" = "web" ]; then + start_web + elif [ "$ACTION" = "data" ]; then + process_data +fi diff --git a/images/tiler-imposm/Dockerfile b/images/tiler-imposm/Dockerfile index 0c38c2d1..e99ccc28 100644 --- a/images/tiler-imposm/Dockerfile +++ b/images/tiler-imposm/Dockerfile @@ -19,12 +19,14 @@ RUN apt-get install -y \ curl \ wget \ unzip \ - software-properties-common + software-properties-common && \ + rm -rf /var/lib/apt/lists/* # # Install python RUN add-apt-repository ppa:deadsnakes/ppa && \ apt-get update && \ apt-get install -y build-essential python3.6 python3.6-dev python3-pip && \ + rm -rf /var/lib/apt/lists/* && \ python3 -m pip install pip --upgrade && \ python3 -m pip install wheel @@ -39,6 +41,8 @@ ENV PATH $PATH:/usr/local/go/bin WORKDIR /go ENV GOPATH /go RUN go get github.com/omniscale/imposm3 +WORKDIR $GOPATH/src/github.com/omniscale/imposm3 +RUN git checkout v0.11.1 RUN go install github.com/omniscale/imposm3/cmd/imposm ENV PATH $PATH:$GOPATH/bin @@ -53,7 +57,5 @@ WORKDIR $IMPOSMDATA VOLUME $IMPOSMDATA WORKDIR /osm -COPY config config -COPY scripts scripts -COPY start.sh start.sh -CMD ./start.sh +COPY . . +CMD ./start.sh \ No newline at end of file diff --git a/images/tiler-imposm/liveness.sh b/images/tiler-imposm/liveness.sh new file mode 100755 index 00000000..a7b249ea --- /dev/null +++ b/images/tiler-imposm/liveness.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +if ps aux | grep -v grep | grep "imposm" >/dev/null; then + echo "imposm process is running." + exit 0 +else + echo "imposm process is not running." 1>&2 + exit 1 +fi diff --git a/images/tiler-imposm/start.sh b/images/tiler-imposm/start.sh index cf2d7922..dfffdc0c 100755 --- a/images/tiler-imposm/start.sh +++ b/images/tiler-imposm/start.sh @@ -1,175 +1,174 @@ #!/bin/bash -set -e -stateFile="state.txt" -PBFFile="osm.pbf" -limitFile="limitFile.geojson" +set -ex # directories to keep the imposm's cache for updating the db -workDir=/mnt/data -cachedir=$workDir/cachedir -mkdir -p $cachedir -diffdir=$workDir/diff -mkdir -p $diffdir -imposm3_expire_dir=$workDir/imposm3_expire_dir -mkdir -p $imposm3_expire_dir -# imposm3_expire_state_dir=$workDir/imposm3_expire_state -# mkdir -p $imposm3_expire_state_dir -# Setting directory -settingDir=/osm +WORKDIR=/mnt/data +CACHE_DIR=$WORKDIR/cachedir +DIFF_DIR=$WORKDIR/diff +IMPOSM3_EXPIRE_DIR=$WORKDIR/imposm3_expire_dir + +PBFFILE="${WORKDIR}/osm.pbf" +STATEFILE="state.txt" +LIMITFILE="limitFile.geojson" +# # Setting directory +# settingDir=/osm # Folder to store the imposm expider files in s3 or gs BUCKET_IMPOSM_FOLDER=imposm INIT_FILE=/mnt/data/init_done -# Create config file to set variable for imposm -echo "{" > $workDir/config.json -echo "\"cachedir\": \"$cachedir\"," >> $workDir/config.json -echo "\"diffdir\": \"$diffdir\"," >> $workDir/config.json -echo "\"connection\": \"postgis://$POSTGRES_USER:$POSTGRES_PASSWORD@$POSTGRES_HOST/$POSTGRES_DB\"," >> $workDir/config.json -echo "\"mapping\": \"config/imposm3.json\"," >> $workDir/config.json -echo "\"replication_url\": \"$REPLICATION_URL\"" >> $workDir/config.json -echo "}" >> $workDir/config.json - -function getData () { - # Import from pubic url, usualy it come from osm - if [ $TILER_IMPORT_FROM == "osm" ]; then - wget $TILER_IMPORT_PBF_URL -O $PBFFile - fi - if [ $TILER_IMPORT_FROM == "osmseed" ]; then - if [ $CLOUDPROVIDER == "aws" ]; then - # Get the state.txt file from S3 - aws s3 cp $AWS_S3_BUCKET/planet/full-history/$stateFile . - PBFCloudPath=$(tail -n +1 $stateFile) - aws s3 cp $PBFCloudPath $PBFFile +mkdir -p "$CACHE_DIR" "$DIFF_DIR" "$IMPOSM3_EXPIRE_DIR" + +# Create config file to set variables for imposm +{ + echo "{" + echo "\"cachedir\": \"$CACHE_DIR\"," + echo "\"diffdir\": \"$DIFF_DIR\"," + echo "\"connection\": \"postgis://$POSTGRES_USER:$POSTGRES_PASSWORD@$POSTGRES_HOST/$POSTGRES_DB\"," + echo "\"mapping\": \"config/imposm3.json\"," + echo "\"replication_url\": \"$REPLICATION_URL\"" + echo "}" +} >"$WORKDIR/config.json" + +function getData() { + ### Get the PBF file from the cloud provider or public URL + if [ "$TILER_IMPORT_FROM" == "osm" ]; then + if [ ! -f "$PBFFILE" ]; then + echo "$PBFFILE does not exist, downloading..." + wget "$TILER_IMPORT_PBF_URL" -O "$PBFFILE" fi - # Google storage - if [ $CLOUDPROVIDER == "gcp" ]; then - # Get the state.txt file from GS - gsutil cp $GCP_STORAGE_BUCKET/planet/full-history/$stateFile . - PBFCloudPath=$(tail -n +1 $stateFile) - gsutil cp $PBFCloudPath $PBFFile + fi +} + +getFormattedDate() { + local file_path="$1" + if command -v stat >/dev/null 2>&1; then + local modification_date=$(stat -c %Y "$file_path") + if [ $? -eq 0 ]; then + local formatted_date=$(date -d "@$modification_date" "+%Y-%m-%d:%H:%M:%S") + echo "Created/Updated date of $file_path: $formatted_date" + else + echo "Error: Unable to get file modification date for file ${file_path}" fi + else + echo "Error: 'stat' command not found. Unable to get file modification date, for file ${file_path}" fi } -function uploadExpiredFiles(){ - # create statte file - # dateStr=$(date '+%y%m%d%H%M%S') - # stateFile=$imposm3_expire_state_dir/expired_${dateStr}.txt - # bucketStateFile=${stateFile#*"$workDir"} - - for file in $(find $imposm3_expire_dir -type f -cmin -1); do - bucketFile=${file#*"$workDir"} - echo $(date +%F_%H:%M:%S)": New file..." $file - # echo $file >> $stateFile +function uploadExpiredFiles() { + # Upload the expired files to the cloud provider + for file in $(find "$IMPOSM3_EXPIRE_DIR" -type f -cmin -1); do + bucketFile=${file#*"$WORKDIR"} + getFormattedDate "$file" + # UPLOAD_EXPIRED_FILES=true to upload the expired to cloud provider + if [ "$UPLOAD_EXPIRED_FILES" == "true" ]; then + echo "Uploading expired file ${file} to ${AWS_S3_BUCKET}" + # AWS if [ "$CLOUDPROVIDER" == "aws" ]; then - aws s3 cp $file ${AWS_S3_BUCKET}/${BUCKET_IMPOSM_FOLDER}${bucketFile} --acl public-read + aws s3 cp "$file" "${AWS_S3_BUCKET}/${BUCKET_IMPOSM_FOLDER}${bucketFile}" --acl public-read fi + # Google Storage if [ "$CLOUDPROVIDER" == "gcp" ]; then - gsutil cp -a public-read $file ${GCP_STORAGE_BUCKET}${BUCKET_IMPOSM_FOLDER}${bucketFile} + gsutil cp -a public-read "$file" "${GCP_STORAGE_BUCKET}${BUCKET_IMPOSM_FOLDER}${bucketFile}" fi - done - # Upload state File - # if [[ -f "$stateFile" ]]; then - # # AWS - # if [ "$CLOUDPROVIDER" == "aws" ]; then - # aws s3 cp $stateFile ${AWS_S3_BUCKET}/${BUCKET_IMPOSM_FOLDER}${bucketStateFile} --acl public-read - # fi - # # Google Storage - # if [ "$CLOUDPROVIDER" == "gcp" ]; then - # gsutil cp -a public-read $stateFile ${GCP_STORAGE_BUCKET}${BUCKET_IMPOSM_FOLDER}${bucketStateFile} - # fi - # fi + else + echo "Expired files were not uploaded because UPLOAD_EXPIRED_FILES=${UPLOAD_EXPIRED_FILES}" + fi + done } -function updateData(){ +function updateData() { + ### Update the DB with the new data form minute replication if [ "$OVERWRITE_STATE" = "true" ]; then - rm $diffdir/last.state.txt + rm $DIFF_DIR/last.state.txt fi - # Verify if last.state.txt exist - if [ -f "$diffdir/last.state.txt" ]; then - echo "Exist... $diffdir/last.state.txt" - else - # OverWrite the last.state.txt file with REPLICATION_URL and sequenceNumber=0 + + # Check if last.state.txt exists + if [ -f "$DIFF_DIR/last.state.txt" ]; then + echo "Exist... $DIFF_DIR/last.state.txt" + else + # Create last.state.txt file with REPLICATION_URL and SEQUENCE_NUMBER from env vars echo "timestamp=0001-01-01T00\:00\:00Z sequenceNumber=$SEQUENCE_NUMBER - replicationUrl=$REPLICATION_URL" > $diffdir/last.state.txt + replicationUrl=$REPLICATION_URL" >$DIFF_DIR/last.state.txt fi + # Check if the limit file exists if [ -z "$TILER_IMPORT_LIMIT" ]; then - imposm run -config $workDir/config.json -expiretiles-dir $imposm3_expire_dir & - while true - do - echo "Updating...$(date +%F_%H-%M-%S)" - uploadExpiredFiles - sleep 1m - done + imposm run -config "$WORKDIR/config.json" -expiretiles-dir "$IMPOSM3_EXPIRE_DIR" & else - imposm run -config $workDir/config.json -limitto $workDir/$limitFile -expiretiles-dir $imposm3_expire_dir & - while true - do - echo "Updating...$(date +%F_%H-%M-%S)" - uploadExpiredFiles - sleep 1m - done + imposm run -config "$WORKDIR/config.json" -limitto "$WORKDIR/$LIMITFILE" -expiretiles-dir "$IMPOSM3_EXPIRE_DIR" & fi + + while true; do + echo "Upload expired files... $(date +%F_%H-%M-%S)" + uploadExpiredFiles + sleep 1m + done } -function importData () { +function importData() { + ### Import the PBF and Natural Earth files to the DB echo "Execute the missing functions" psql "postgresql://$POSTGRES_USER:$POSTGRES_PASSWORD@$POSTGRES_HOST/$POSTGRES_DB" -a -f config/postgis_helpers.sql - echo "Import Natural Earth" - ./scripts/natural_earth.sh - echo "Import OSM Land" - ./scripts/osm_land.sh - echo "Import PBF file" + + if [ "$IMPORT_NATURAL_EARTH" = "true" ]; then + echo "Importing Natural Earth..." + ./scripts/natural_earth.sh + fi + + if [ "$IMPORT_OSM_LAND" = "true" ]; then + echo "Import OSM Land..." + ./scripts/osm_land.sh + fi + + echo "Import PBF file..." if [ -z "$TILER_IMPORT_LIMIT" ]; then imposm import \ - -config $workDir/config.json \ - -read $PBFFile \ - -write \ - -diff -cachedir $cachedir -diffdir $diffdir + -config $WORKDIR/config.json \ + -read $PBFFILE \ + -write \ + -diff -cachedir $CACHE_DIR -overwritecache -diffdir $DIFF_DIR else - wget $TILER_IMPORT_LIMIT -O $workDir/$limitFile + wget $TILER_IMPORT_LIMIT -O $WORKDIR/$LIMITFILE imposm import \ - -config $workDir/config.json \ - -read $PBFFile \ - -write \ - -diff -cachedir $cachedir -diffdir $diffdir \ - -limitto $workDir/$limitFile + -config $WORKDIR/config.json \ + -read $PBFFILE \ + -write \ + -diff -cachedir $CACHE_DIR -overwritecache -diffdir $DIFF_DIR \ + -limitto $WORKDIR/$LIMITFILE fi imposm import \ - -config $workDir/config.json \ - -deployproduction - # -diff -cachedir $cachedir -diffdir $diffdir + -config $WORKDIR/config.json \ + -deployproduction # These index will help speed up tegola tile generation psql "postgresql://$POSTGRES_USER:$POSTGRES_PASSWORD@$POSTGRES_HOST/$POSTGRES_DB" -a -f config/postgis_index.sql touch $INIT_FILE + # Update the DB updateData } - echo "Connecting to $POSTGRES_HOST DB" flag=true while "$flag" = true; do pg_isready -h $POSTGRES_HOST -p 5432 >/dev/null 2>&2 || continue - # Change flag to false to stop ping the DB - flag=false - echo "Check if $INIT_FILE exists" - if ([[ -f $INIT_FILE ]]); then - echo "Update the DB with osm data" - updateData - else - echo "Import PBF data to DB" - getData - if [ -f $PBFFile ]; then - echo "Start importing the data" - importData - fi + # Change flag to false to stop ping the DB + flag=false + echo "Check if $INIT_FILE exists" + if ([[ -f $INIT_FILE ]]); then + echo "Update the DB with osm data" + updateData + else + echo "Import PBF data to DB" + getData + if [ -f $PBFFILE ]; then + echo "Start importing the data" + importData fi + fi done diff --git a/images/tiler-server/Dockerfile b/images/tiler-server/Dockerfile index 10df7d19..64255bff 100644 --- a/images/tiler-server/Dockerfile +++ b/images/tiler-server/Dockerfile @@ -10,7 +10,7 @@ RUN apk add musl-dev=1.1.24-r3 \ RUN mkdir -p /go/src/github.com/go-spatial/tegola RUN git clone https://github.com/go-spatial/tegola.git /go/src/github.com/go-spatial/tegola -RUN cd /go/src/github.com/go-spatial/tegola && git checkout v0.8.1 +RUN cd /go/src/github.com/go-spatial/tegola && git checkout v0.13.0 RUN cd /go/src/github.com/go-spatial/tegola/cmd/tegola \ && go build -gcflags "-N -l" -o /opt/tegola \ && chmod a+x /opt/tegola @@ -36,7 +36,7 @@ RUN pip3 install awscli RUN curl -sSL https://sdk.cloud.google.com | bash RUN ln -f -s /root/google-cloud-sdk/bin/gsutil /usr/bin/gsutil -RUN pip install mercantile \ +RUN pip3 install mercantile \ && apk del build-deps \ && rm -rf /var/cache/apk/* \ && apk --purge -v del py-pip diff --git a/images/tiler-server/expire-watcher.sh b/images/tiler-server/expire-watcher.sh index ef866b13..e0613fb1 100755 --- a/images/tiler-server/expire-watcher.sh +++ b/images/tiler-server/expire-watcher.sh @@ -10,6 +10,6 @@ while true ; do if [ "$sum" != "$new_sum" ]; then ./seed-by-diffs.sh else - sleep 1 + sleep 30s fi done \ No newline at end of file diff --git a/images/tiler-server/seed-by-diffs.sh b/images/tiler-server/seed-by-diffs.sh index 7963b6f1..0116429e 100755 --- a/images/tiler-server/seed-by-diffs.sh +++ b/images/tiler-server/seed-by-diffs.sh @@ -16,49 +16,32 @@ completed_jobs=$workDir/imposm/completed.list completed_dir=$workDir/imposm/imposm3_expire_purged mkdir -p $completed_dir - # List files in expire_dir -imp_list=`find $expire_dir -name '*.tiles' -type f` +imp_list=$(find $expire_dir -name '*.tiles' -type f) for f in $imp_list; do - echo "$f" >> $queued_jobs + echo "$f" >>$queued_jobs done # Sort the files and set unique rows -if [ -f $queued_jobs ] ; then - sort -u $queued_jobs > $workDir/imposm/tmp.list && mv $workDir/imposm/tmp.list $queued_jobs +if [ -f $queued_jobs ]; then + sort -u $queued_jobs >$workDir/imposm/tmp.list && mv $workDir/imposm/tmp.list $queued_jobs fi for f in $imp_list; do - echo "seeding from $f" - # Read each line on the tiles file - while IFS= read -r tile - do - bounds="$(python tile2bounds.py $tile)" - echo tegola cache purge \ - --config=/opt/tegola_config/config.toml \ - --min-zoom=0 --max-zoom=20 \ - --bounds=$bounds \ - tile-name=$tile - - tegola cache purge \ + # echo "Purge tiles from...$f" + tegola cache purge tile-list $f \ --config=/opt/tegola_config/config.toml \ - --min-zoom=0 --max-zoom=20 \ - --bounds=$bounds \ - tile-name=$tile - err=$? - if [[ $err != "0" ]]; then - #error - echo "tegola exited with error code $err" - # rm $queued_jobs - exit - fi - done < "$f" - echo "$f" >> $completed_jobs + --format="/zxy" \ + --min-zoom=0 \ + --max-zoom=20 \ + --overwrite=true & + sleep 10s + echo "$f" >>$completed_jobs mv $f $completed_dir done -if [ -f $queued_jobs ] ; then - echo "finished seeding" +if [ -f $queued_jobs ]; then + # Completed queued jobs rm $queued_jobs fi diff --git a/images/web/Dockerfile b/images/web/Dockerfile index ce843aca..f880e6fe 100644 --- a/images/web/Dockerfile +++ b/images/web/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 +FROM ubuntu:22.04 ENV DEBIAN_FRONTEND=noninteractive ENV workdir /var/www @@ -6,29 +6,37 @@ ENV workdir /var/www ENV RAILS_ENV=production # Install the openstreetmap-website dependencies -RUN apt-get update \ +RUN apt-get update \ && apt-get install -y \ - ruby2.7 libruby2.7 ruby2.7-dev libmagickwand-dev libxml2-dev libxslt1-dev \ - nodejs npm apache2 apache2-dev build-essential git-core firefox-geckodriver postgresql-client \ - libpq-dev libsasl2-dev imagemagick libffi-dev libgd-dev libarchive-dev libbz2-dev yarnpkg curl \ + ruby ruby-dev ruby-bundler libmagickwand-dev libxml2-dev libxslt1-dev \ + apache2 apache2-dev build-essential git-core postgresql-client \ + libpq-dev libsasl2-dev imagemagick libffi-dev libgd-dev libarchive-dev libbz2-dev curl \ + default-jre-headless file gpg-agent libvips-dev locales software-properties-common tzdata unzip \ + advancecomp gifsicle libjpeg-progs jhead jpegoptim optipng pngcrush pngquant libyaml-dev \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# Install openstreetmap-cgimap requiriments -RUN apt-get update \ - && apt-get -y install \ - libxml2-dev libpqxx-dev libfcgi-dev zlib1g-dev \ - libboost-dev libboost-program-options-dev libboost-filesystem-dev \ - libboost-system-dev libboost-locale-dev libmemcached-dev \ - libcrypto++-dev libargon2-dev libyajl-dev automake autoconf libtool \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - +## Install node +RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - +RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - +RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list +RUN apt-get update && apt-get install -y nodejs yarn && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Install openstreetmap-cgimap requirements +RUN apt-get update && apt-get -y install libpqxx-dev libfcgi-dev zlib1g-dev \ + libboost-dev libboost-program-options-dev libfmt-dev \ + libmemcached-dev libcrypto++-dev libargon2-dev libyajl-dev \ + automake autoconf libtool && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + # Install cgimap ENV cgimap /tmp/openstreetmap-cgimap +ENV CGIMAP_GITSHA=5cd3d21bebe9d205828608be4c65bbda8b464308 RUN git clone -b master https://github.com/zerebubuth/openstreetmap-cgimap.git $cgimap -# openstreetmap-cgimap version at Jun 21, 2021 -RUN cd $cgimap && git checkout v0.8.6 +RUN cd $cgimap && git checkout $CGIMAP_GITSHA RUN cd $cgimap && \ ./autogen.sh && \ ./configure && \ @@ -40,12 +48,12 @@ RUN npm install -g svgo # Install openstreetmap-website RUN rm -rf $workdir/html -# GITSHA value at 15-02-2022 -ENV OPENSTREETMAP_WEBSITE_GITSHA=c24b5481812aba9e83da1fd855ccb37f92c5d75e + +## Sep 2023 +ENV OPENSTREETMAP_WEBSITE_GITSHA=d23763d6cdbf5ec11f0e83f8e6e8fb32ed973e6a RUN curl -L https://github.com/openstreetmap/openstreetmap-website/archive/$OPENSTREETMAP_WEBSITE_GITSHA.zip --output website.zip && unzip website.zip RUN mv openstreetmap-website-$OPENSTREETMAP_WEBSITE_GITSHA/* $workdir/ WORKDIR $workdir -RUN echo "gem 'image_optim_pack', :git => 'https://github.com/toy/image_optim_pack.git'" >> Gemfile # Install Ruby packages RUN gem install bundler && bundle install @@ -56,23 +64,26 @@ RUN touch $workdir/config/settings.local.yml RUN cp $workdir/config/example.storage.yml $workdir/config/storage.yml RUN echo "#session key \n\ production: \n\ - secret_key_base: $(bundle exec rake secret)" > $workdir/config/secrets.yml + secret_key_base: $(rails secret)" > $workdir/config/secrets.yml # Protect sensitive information RUN chmod 600 $workdir/config/database.yml $workdir/config/secrets.yml -RUN bundle exec rake yarn:install -RUN bundle exec rake i18n:js:export -RUN bundle exec rake assets:precompile +RUN bundle exec bin/yarn install + +RUN rails i18n:js:export assets:precompile # The rack interface requires a `tmp` directory to use openstreetmap-cgimap RUN ln -s /tmp /var/www/tmp # Add Apache configuration file ADD config/production.conf /etc/apache2/sites-available/production.conf +RUN a2enmod headers +RUN a2enmod setenvif RUN a2dissite 000-default RUN a2ensite production # Install Passenger + Apache module -RUN apt-get update && apt-get install -y libapache2-mod-passenger +RUN apt-key adv --refresh-keys --keyserver keyserver.ubuntu.com +RUN apt-get update && apt-get install -y libapache2-mod-passenger lighttpd # Enable the Passenger Apache module and restart Apache RUN echo "ServerName $(cat /etc/hostname)" >> /etc/apache2/apache2.conf @@ -83,23 +94,18 @@ RUN /usr/bin/passenger-config validate-install RUN /usr/sbin/passenger-memory-stats # Enable required apache modules for the cgimap Apache service -RUN a2enmod proxy proxy_http rewrite +RUN a2enmod proxy proxy_http rewrite lbmethod_byrequests proxy_fcgi # Config the virtual host apache2 -ADD config/cgimap.conf /tmp/ -RUN sed -e 's/RewriteRule ^(.*)/#RewriteRule ^(.*)/' \ - -e 's/\/var\/www/\/var\/www\/public/g' \ - /tmp/cgimap.conf > /etc/apache2/sites-available/cgimap.conf -RUN chmod 644 /etc/apache2/sites-available/cgimap.conf -RUN a2ensite cgimap RUN apache2ctl configtest # Set Permissions for www-data RUN chown -R www-data: $workdir # Add settings -ADD config/settings.yml $workdir/config/settings.yml +ADD config/settings.yml $workdir/config/ COPY start.sh $workdir/ +COPY liveness.sh $workdir/ CMD $workdir/start.sh diff --git a/images/web/config/cgimap.conf b/images/web/config/cgimap.conf deleted file mode 100644 index 059cab88..00000000 --- a/images/web/config/cgimap.conf +++ /dev/null @@ -1,43 +0,0 @@ - - -# An Apache configuration file to direct requests to cgimap or to a rails port -# API server, depending on if cgimap can handle the request. It assumes cgimap -# is running on port 8000 and that /var/www is the DocumentRoot -# Requires mod_rewrite, mod_proxy and mod_proxy_http - -# Adjust as needed for the DocumentRoot. You will need to create the api -# directory, but nothing need be in it. - - # This section could be placed in a .htaccess file in /var/www/public, but - # the ProxyPreserveHost directive can't. If you don't have access to the - # apache config you won't be able to get OAuth to work - - # Enable rewrites - RewriteEngine on - - # Rewrite calls which there is only a GET version and all request methods - # can go to cgimap. - RewriteRule ^0\.6/map$ - [L,H=fcgi:127.0.0.1:8000] - RewriteRule ^0\.6/(nodes|ways|relations)$ - [L,H=fcgi:127.0.0.1:8000] - RewriteRule ^0\.6/(way|relation)/([^/]+)/full$ - [L,H=fcgi:127.0.0.1:8000] - - # Cgimap has experimental support for these calls. Uncomment to use them - # RewriteRule ^0\.6/node/([^/]+)/ways$ - [L,H=fcgi:127.0.0.1:8000] - - # Calls where we only want to direct the GET version to cgimap, not all - # versions. Also note that this won't capture node/#/history which aren't - # handled by cgimap. - RewriteCond %{REQUEST_METHOD} ^(GET|HEAD)$ - RewriteRule ^0\.6/(node|way|relation)/([^/]+)$ - [L,H=fcgi:127.0.0.1:8000] - - # Fall-through to the main API server running the rails port to take calls - # not handled by cgimap. - RewriteRule ^(.*) http://api.openstreetmap.org/api/$1 [P] - - -# Required for OAuth to work. The Host header is used to generate the OAuth -# signature so we can't modify it. - -# This relies on api.openstreetmap.org accepting requests with a Host header -# that doesn't match a domain actually pointed to it. -ProxyPreserveHost On diff --git a/images/web/config/production.conf b/images/web/config/production.conf index 8b9f3c2b..89f73d8f 100644 --- a/images/web/config/production.conf +++ b/images/web/config/production.conf @@ -3,14 +3,40 @@ # Tell Apache and Passenger where your app's 'public' directory is DocumentRoot /var/www/public PassengerRuby /usr/bin/ruby - RewriteEngine On RewriteCond %{HTTP:X-Forwarded-Proto} =http - # RewriteRule .* https://%{HTTP:Host}%{REQUEST_URI} [L,R=permanent] + # Development mode in case domain is localhost + # ======Redirect to HTTPS + RewriteCond %{HTTP_HOST} !=localhost + RewriteCond %{HTTP_HOST} !=127.0.0.1 + RewriteCond %{HTTPS} off + RewriteRule .* https://%{HTTP_HOST}%{REQUEST_URI} [L,R=301] + + # ======Redirect to wwww osmseed.org + # RewriteCond %{HTTP_HOST} =osmseed.org + # RewriteCond %{HTTP_HOST} !^www\. [NC] + # RewriteRule .* https://www.%{HTTP_HOST}%{REQUEST_URI} [L,R=301] + + + CGIPassAuth On + + + # ======Proxying traffic to CGImap==== + RewriteCond %{REQUEST_URI} ^/api/0\.6/map + RewriteRule ^/api/0\.6/map(\.json|\.xml)?$ fcgi://127.0.0.1:8000$0 [P] + RewriteCond %{REQUEST_METHOD} ^(HEAD|GET)$ + RewriteRule ^/api/0\.6/(node|way|relation|changeset)/[0-9]+(\.json|\.xml)?$ fcgi://127.0.0.1:8000$0 [P] + RewriteRule ^/api/0\.6/(node|way|relation)/[0-9]+/history(\.json|\.xml)?$ fcgi://127.0.0.1:8000$0 [P] + RewriteRule ^/api/0\.6/(node|way|relation)/[0-9]+/relations(\.json|\.xml)?$ fcgi://127.0.0.1:8000$0 [P] + RewriteRule ^/api/0\.6/node/[0-9]+/ways(\.json|\.xml)?$ fcgi://127.0.0.1:8000$0 [P] + RewriteRule ^/api/0\.6/(way|relation)/[0-9]+/full(\.json|\.xml)?$ fcgi://127.0.0.1:8000$0 [P] + RewriteRule ^/api/0\.6/(nodes|ways|relations)(\.json|\.xml)?$ fcgi://127.0.0.1:8000$0 [P] + RewriteRule ^/api/0\.6/changeset/[0-9]+/(upload|download)(\.json|\.xml)?$ fcgi://127.0.0.1:8000$0 [P] # Relax Apache security settings + AllowOverride None Allow from all Options -MultiViews - \ No newline at end of file + diff --git a/images/web/config/settings.yml b/images/web/config/settings.yml index 494bcb49..ce42d9e5 100644 --- a/images/web/config/settings.yml +++ b/images/web/config/settings.yml @@ -1,4 +1,3 @@ - # The server protocol and host server_protocol: "http" server_url: "openstreetmap.example.com" @@ -28,30 +27,54 @@ status: "online" max_request_area: 0.25 # Number of GPS trace/trackpoints returned per-page tracepoints_per_page: 5000 +# Default limit on the number of changesets returned by the changeset query api method +default_changeset_query_limit: 100 +# Maximum limit on the number of changesets returned by the changeset query api method +max_changeset_query_limit: 100 # Maximum number of nodes that will be returned by the api in a map request max_number_of_nodes: 50000 # Maximum number of nodes that can be in a way (checked on save) max_number_of_way_nodes: 2000 +# Maximum number of members that can be in a relation (checked on save) +max_number_of_relation_members: 32000 # The maximum area you're allowed to request notes from, in square degrees max_note_request_area: 25 +# Default limit on the number of notes returned by the note search api method +default_note_query_limit: 100 +# Maximum limit on the number of notes returned by the note search api method +max_note_query_limit: 10000 +# Maximum value of open issues counter for moderators, anything equal or greater to this value "n" is shown as "n+" +max_issues_count: 99 +# Maximum number of points in a GPX trace +max_trace_size: 1000000 # Zoom level to use for postcode results from the geocoder postcode_zoom: 15 -# Zoom level to use for geonames results from the geocoder -geonames_zoom: 12 # Timeout for API calls in seconds api_timeout: 300 # Timeout for web pages in seconds web_timeout: 30 # Periods (in hours) which are allowed for user blocks user_block_periods: [0, 1, 3, 6, 12, 24, 48, 96, 168, 336, 731, 4383, 8766, 87660] +# Account deletion cooldown period (in hours) since last changeset close; null to disable, 0 to make sure there aren't any open changesets when the deletion happens +user_account_deletion_delay: null # Rate limit for message sending max_messages_per_hour: 60 # Rate limit for friending max_friends_per_hour: 60 +# Rate limit for changeset comments +min_changeset_comments_per_hour: 1 +initial_changeset_comments_per_hour: 6 +max_changeset_comments_per_hour: 60 +moderator_changeset_comments_per_hour: 36000 +# Rate limit for changes +min_changes_per_hour: 100 +initial_changes_per_hour: 1000 +max_changes_per_hour: 100000 +days_to_max_changes: 7 +importer_changes_per_hour: 1000000 +moderator_changes_per_hour: 1000000 # Domain for handling message replies #messages_domain: "messages.openstreetmap.org" -# Geonames authentication details -#geonames_username: "" # MaxMind GeoIPv2 database #maxmind_database: "" # Users to show as being nearby @@ -64,9 +87,6 @@ spam_threshold: 50 diary_feed_delay: 0 # Default legale (jurisdiction location) for contributor terms default_legale: GB -# Location of GPX traces and images -gpx_trace_dir: "/home/osm/traces" -gpx_image_dir: "/home/osm/images" # Location of data for attachments attachments_dir: ":rails_root/public/attachments" # Log file to use @@ -75,44 +95,45 @@ attachments_dir: ":rails_root/public/attachments" #logstash_path: "" # List of memcache servers to use for caching #memcache_servers: [] +# Enable HTTP basic authentication support +basic_auth_support: true # Enable legacy OAuth 1.0 support oauth_10_support: true +oauth_10_registration: true # URL of Nominatim instance to use for geocoding nominatim_url: "https://nominatim.openstreetmap.org/" # Default editor default_editor: "id" # OAuth application for the web site -#oauth_application: "" -# OAuth consumer key for iD -#id_key: "" +oauth_application: "OAUTH_CLIENT_ID" +oauth_key: "OAUTH_KEY" +# OAuth application for iD +id_application: "" # Imagery to return in capabilities as blacklisted -imagery_blacklist: - # Current Google imagery URLs have google or googleapis in the domain - # with a vt or kh endpoint, and x, y and z query parameters - - ".*\\.google(apis)?\\..*/(vt|kh)[\\?/].*([xyz]=.*){3}.*" - # Blacklist VWorld - - "http://xdworld\\.vworld\\.kr:8080/.*" - # Blacklist here - - ".*\\.here\\.com[/:].*" +imagery_blacklist: [] # URL of Overpass instance to use for feature queries overpass_url: "https://overpass-api.de/api/interpreter" +overpass_credentials: false # Routing endpoints graphhopper_url: "https://graphhopper.com/api/1/route" fossgis_osrm_url: "https://routing.openstreetmap.de/" +fossgis_valhalla_url: "https://valhalla1.openstreetmap.de/route" # External authentication credentials #google_auth_id: "" #google_auth_secret: "" #google_openid_realm: "" #facebook_auth_id: "" #facebook_auth_secret: "" -#windowslive_auth_id: "" -#windowslive_auth_secret: "" #github_auth_id: "" #github_auth_secret: "" +#microsoft_auth_id: "" +#microsoft_auth_secret: "" #wikipedia_auth_id: "" #wikipedia_auth_secret: "" # Thunderforest authentication details #thunderforest_key: "" +# Tracestrack authentication details +#tracestrack_key: "" # Key for generating TOTP tokens #totp_key: "" # Enforce Content-Security-Policy @@ -134,7 +155,20 @@ trace_icon_storage: "local" smtp_address: "localhost" smtp_port: 25 smtp_domain: "localhost" -smtp_enable_starttls_auto: false +smtp_enable_starttls_auto: true +smtp_tls_verify_mode: "none" smtp_authentication: null smtp_user_name: null smtp_password: null +# Matomo settings for analytics +#matomo: +# Signup rate limits +#signup_ip_per_day: +#signup_ip_max_burst: +#signup_email_per_day: +#signup_email_max_burst: +# Private key for signing id_tokens +doorkeeper_signing_key: | + -----BEGIN PRIVATE KEY----- + PRIVATE_KEY + -----END PRIVATE KEY----- diff --git a/images/web/liveness.sh b/images/web/liveness.sh new file mode 100755 index 00000000..5d30dfd1 --- /dev/null +++ b/images/web/liveness.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# This is a script for the complex evaluation of whether Apache or other processes are running in the container. +if [ $(ps -ef | grep -E 'httpd|apache2' | grep -v grep | wc -l) -ge 1 ]; then + echo "Apache is running." + exit 0 +else + echo "Apache is not running!" 1>&2 + exit 1 +fi diff --git a/images/web/start.sh b/images/web/start.sh index 3d5246cb..5835fb75 100755 --- a/images/web/start.sh +++ b/images/web/start.sh @@ -1,7 +1,6 @@ #!/usr/bin/env bash workdir="/var/www" export RAILS_ENV=production -#### Because we can not set up many env variable in build process, we are going to process here! #### SETTING UP THE PRODUCTION DATABASE echo " # Production DB @@ -26,31 +25,62 @@ sed -i -e 's/smtp_user_name: null/smtp_user_name: "'$MAILER_USERNAME'"/g' $workd sed -i -e 's/smtp_password: null/smtp_password: "'$MAILER_PASSWORD'"/g' $workdir/config/settings.yml sed -i -e 's/openstreetmap@example.com/'$MAILER_FROM'/g' $workdir/config/settings.yml sed -i -e 's/smtp_port: 25/smtp_port: '$MAILER_PORT'/g' $workdir/config/settings.yml + +### SETTING UP UP OAUTH-2 ID KEY FOR iD +sed -i -e 's/id_application: ""/id_application: "'$OPENSTREETMAP_id_key'"/g' $workdir/config/settings.yml -#### SET UP ID KEY -sed -i -e 's/#id_key: ""/id_key: "'$OPENSTREETMAP_id_key'"/g' $workdir/config/settings.yml - -### SET UP OAUTH ID AND KEY +### SETTING UP OAUTH-2 ID KEY WEBSITE sed -i -e 's/OAUTH_CLIENT_ID/'$OAUTH_CLIENT_ID'/g' $workdir/config/settings.yml sed -i -e 's/OAUTH_KEY/'$OAUTH_KEY'/g' $workdir/config/settings.yml -## SET NOMINATIM URL +#### SETTING UP ENV VARS FOR MEMCACHED SERVER +sed -i -e 's/#memcache_servers: \[\]/memcache_servers: "'$OPENSTREETMAP_memcache_servers'"/g' $workdir/config/settings.yml + +### SETTING UP NOMINATIM URL sed -i -e 's/nominatim.openstreetmap.org/'$NOMINATIM_URL'/g' $workdir/config/settings.yml +#### SETTING UP OVERPASS URL +sed -i -e 's/overpass-api.de/'$OVERPASS_URL'/g' $workdir/config/settings.yml +sed -i -e 's/overpass-api.de/'$OVERPASS_URL'/g' $workdir/app/views/site/export.html.erb +sed -i -e 's/overpass-api.de/'$OVERPASS_URL'/g' $workdir/app/assets/javascripts/index/export.js + +### SETTING UP ORGANIZATION +sed -i -e 's/OpenStreetMap/'$ORGANIZATION_NAME'/g' $workdir/config/settings.yml +ORGANIZATION_NAME_LOWER=$(echo "$ORGANIZATION_NAME" | tr '[:upper:]' '[:lower:]') +sed -i -e 's/openstreetmap/'"$ORGANIZATION_NAME_LOWER"'/g' "$workdir/config/settings.yml" + +### ADDING DOORKEEPER_SIGNING_KEY +openssl genpkey -algorithm RSA -out private.pem +chmod 400 /var/www/private.pem +export DOORKEEPER_SIGNING_KEY=$(cat /var/www/private.pem | sed -e '1d;$d' | tr -d '\n') +sed -i "s#PRIVATE_KEY#${DOORKEEPER_SIGNING_KEY}#" $workdir/config/settings.yml + #### CHECK IF DB IS ALREADY UP AND START THE APP flag=true +site_loading=true + while "$flag" = true; do pg_isready -h $POSTGRES_HOST -p 5432 >/dev/null 2>&2 || continue flag=false - # Print the log while compiling the assets + until $(curl -sf -o /dev/null $SERVER_URL); do - echo "Waiting to start rails ports server..." + if [ "$site_loading" = true ]; then + echo "Waiting to start Rails ports server..." + site_loading=false + fi sleep 2 done & - - # bundle exec rake assets:precompile --trace + time rails i18n:js:export assets:precompile bundle exec rails db:migrate - # Start the delayed jobs queue worker and Start the app + /usr/local/bin/openstreetmap-cgimap \ + --port=8000 \ + --daemon \ + --instances=3 \ + --dbname=$POSTGRES_DB \ + --host=$POSTGRES_HOST \ + --username=$POSTGRES_USER \ + --password=$POSTGRES_PASSWORD \ + --logfile log/cgimap.log bundle exec rake jobs:work & apachectl -k start -DFOREGROUND done diff --git a/osm-seed/templates/db/db-backup-job.yaml b/osm-seed/templates/db/db-backup-job.yaml index 8cabd74a..bd1e1aec 100644 --- a/osm-seed/templates/db/db-backup-job.yaml +++ b/osm-seed/templates/db/db-backup-job.yaml @@ -1,15 +1,15 @@ -{{- if .Values.dbBackupRestore.enabled -}} +{{- range $i, $job := .Values.dbBackupRestore.cronjobs }} +{{- if $job.enabled -}} apiVersion: batch/v1 kind: CronJob metadata: - name: {{ .Release.Name }}-db-backup-job + name: {{ $.Release.Name }}-{{ $job.name }}-backup-job labels: - app: {{ template "osm-seed.name" . }} - component: db-backup-job - environment: {{ .Values.environment }} - release: {{ .Release.Name }} + component: {{ $job.name }}-job + environment: {{ $.Values.environment }} + release: {{ $.Release.Name }} spec: - schedule: {{ quote .Values.dbBackupRestore.schedule }} + schedule: {{ quote $job.schedule }} successfulJobsHistoryLimit: 2 failedJobsHistoryLimit: 2 startingDeadlineSeconds: 100 @@ -19,57 +19,141 @@ spec: template: spec: containers: - - name: {{ .Release.Name }}-db-backup-job - image: {{ .Values.dbBackupRestore.image.name }}:{{ .Values.dbBackupRestore.image.tag }} + - name: {{ $job.name }}-backup-job + image: {{ $.Values.dbBackupRestore.image.name }}:{{ $.Values.dbBackupRestore.image.tag }} command: ['/start.sh'] - {{- if .Values.dbBackupRestore.resources.enabled }} - resources: - requests: - memory: {{ .Values.dbBackupRestore.resources.requests.memory }} - cpu: {{ .Values.dbBackupRestore.resources.requests.cpu }} - limits: - memory: {{ .Values.dbBackupRestore.resources.limits.memory }} - cpu: {{ .Values.dbBackupRestore.resources.limits.cpu }} - {{- end }} env: + # Env vars for api-web database + {{ if eq $job.name "web-db" }} + # DB credentials - name: POSTGRES_HOST - value: {{ .Release.Name }}-db + value: {{ $.Release.Name }}-db - name: POSTGRES_DB - value: {{ .Values.db.env.POSTGRES_DB }} + value: {{ $.Values.db.env.POSTGRES_DB }} - name: POSTGRES_PASSWORD - value: {{ quote .Values.db.env.POSTGRES_PASSWORD }} + value: {{ quote $.Values.db.env.POSTGRES_PASSWORD }} - name: POSTGRES_USER - value: {{ .Values.db.env.POSTGRES_USER }} + value: {{ $.Values.db.env.POSTGRES_USER }} + # DB action: backup/restore - name: DB_ACTION - value: {{ .Values.dbBackupRestore.env.DB_ACTION }} + value: {{quote $job.env.DB_ACTION }} + # Backup naming + - name: SET_DATE_AT_NAME + value: {{quote $job.env.SET_DATE_AT_NAME }} + - name: BACKUP_CLOUD_FOLDER + value: {{ $job.env.BACKUP_CLOUD_FOLDER }} + - name: BACKUP_CLOUD_FILE + value: {{ $job.env.BACKUP_CLOUD_FILE }} + - name: AWS_S3_BUCKET + value: {{ $job.env.AWS_S3_BUCKET }} + # Clean up options + - name: CLEANUP_BACKUPS + value: {{ quote $job.env.CLEANUP_BACKUPS }} + - name: RETENTION_DAYS + value: {{ quote $job.env.RETENTION_DAYS }} + # In case DB_ACTION=restore, adn the backup url - name: RESTORE_URL_FILE - value: {{.Values.dbBackupRestore.env.RESTORE_URL_FILE}} - - name: CLEAN_BACKUPS - value: {{ quote .Values.dbBackupRestore.env.CLEAN_BACKUPS }} - - name: CLOUDPROVIDER - value: {{ .Values.cloudProvider }} - # In case cloudProvider=aws - {{- if eq .Values.cloudProvider "aws" }} + value: {{ $job.env.RESTORE_URL_FILE}} + {{- end }} + # Env vars for osmcha database + {{ if eq $job.name "tm-db" }} + # DB credentials + - name: POSTGRES_HOST + value: {{ $.Release.Name }}-tasking-manager-db + - name: POSTGRES_DB + value: {{ $.Values.tmDb.env.POSTGRES_DB }} + - name: POSTGRES_PASSWORD + value: {{ quote $.Values.tmDb.env.POSTGRES_PASSWORD }} + - name: POSTGRES_USER + value: {{ $.Values.tmDb.env.POSTGRES_USER }} + # DB action: backup/restore + - name: DB_ACTION + value: {{ quote $job.env.DB_ACTION }} + # Backup naming + - name: SET_DATE_AT_NAME + value: {{quote $job.env.SET_DATE_AT_NAME }} + - name: BACKUP_CLOUD_FOLDER + value: {{ $job.env.BACKUP_CLOUD_FOLDER }} + - name: BACKUP_CLOUD_FILE + value: {{ $job.env.BACKUP_CLOUD_FILE }} - name: AWS_S3_BUCKET - value: {{ .Values.dbBackupRestore.env.AWS_S3_BUCKET }} - {{- end }} - # In case cloudProvider=gcp - {{- if eq .Values.cloudProvider "gcp" }} + value: {{ $job.env.AWS_S3_BUCKET }} + # Clean up options + - name: CLEANUP_BACKUPS + value: {{ quote $job.env.CLEANUP_BACKUPS }} + - name: RETENTION_DAYS + value: {{ quote $job.env.RETENTION_DAYS }} + # In case DB_ACTION=restore, adn the backup url + - name: RESTORE_URL_FILE + value: {{ $job.env.RESTORE_URL_FILE}} + {{- end }} + # Env vars for osmcha database + {{ if eq $job.name "osmcha-db" }} + # DB credentials + - name: POSTGRES_HOST + value: {{ $.Release.Name }}-osmcha-db + - name: POSTGRES_DB + value: {{ $.Values.osmchaDb.env.POSTGRES_DB }} + - name: POSTGRES_PASSWORD + value: {{ quote $.Values.osmchaDb.env.POSTGRES_PASSWORD }} + - name: POSTGRES_USER + value: {{ $.Values.osmchaDb.env.POSTGRES_USER }} + # DB action: backup/restore + - name: DB_ACTION + value: {{ quote $job.env.DB_ACTION }} + # Backup naming + - name: SET_DATE_AT_NAME + value: {{quote $job.env.SET_DATE_AT_NAME }} + - name: BACKUP_CLOUD_FOLDER + value: {{ $job.env.BACKUP_CLOUD_FOLDER }} + - name: BACKUP_CLOUD_FILE + value: {{ $job.env.BACKUP_CLOUD_FILE }} + - name: AWS_S3_BUCKET + value: {{ $job.env.AWS_S3_BUCKET }} + # Clean up options + - name: CLEANUP_BACKUPS + value: {{ quote $job.env.CLEANUP_BACKUPS }} + - name: RETENTION_DAYS + value: {{ quote $job.env.RETENTION_DAYS }} + # In case DB_ACTION=restore, adn the backup url + - name: RESTORE_URL_FILE + value: {{ $job.env.RESTORE_URL_FILE}} + {{- end }} + # Cloud provider + - name: CLOUDPROVIDER + value: {{ $.Values.cloudProvider }} + # Credentials or configurations for each cloud provider + # {{- if eq $.Values.cloudProvider "aws" }} + # - name: AWS_S3_BUCKET + # value: {{ $job.env.AWS_S3_BUCKET }} + # {{- end }} + {{- if eq $.Values.cloudProvider "gcp" }} - name: GCP_STORAGE_BUCKET - value: {{ .Values.GCP_STORAGE_BUCKET }} - {{- end }} - # In case cloudProvider=azure - {{- if eq .Values.cloudProvider "azure" }} + value: {{ $job.env.GCP_STORAGE_BUCKET }} + {{- end }} + {{- if eq $.Values.cloudProvider "azure" }} - name: AZURE_STORAGE_ACCOUNT - value: {{ .Values.AZURE_STORAGE_ACCOUNT }} + value: {{ $.Values.AZURE_STORAGE_ACCOUNT }} - name: AZURE_CONTAINER_NAME - value: {{ .Values.AZURE_CONTAINER_NAME }} + value: {{ $job.AZURE_CONTAINER_NAME }} - name: AZURE_STORAGE_CONNECTION_STRING - value: {{ .Values.AZURE_STORAGE_CONNECTION_STRING }} - {{- end }} + value: {{ $.Values.AZURE_STORAGE_CONNECTION_STRING }} + {{- end }} + # Node selector + {{- if $job.resources.enabled }} + resources: + requests: + memory: {{ $job.resources.requests.memory }} + cpu: {{ $job.resources.requests.cpu }} + limits: + memory: {{ $job.resources.limits.memory }} + cpu: {{ $job.resources.limits.cpu }} + {{- end }} restartPolicy: OnFailure - {{- if .Values.dbBackupRestore.nodeSelector.enabled }} + {{- if $job.nodeSelector.enabled }} nodeSelector: - {{ .Values.dbBackupRestore.nodeSelector.label_key }} : {{ .Values.dbBackupRestore.nodeSelector.label_value }} + {{ $job.nodeSelector.label_key }} : {{ $job.nodeSelector.label_value }} {{- end }} -{{- end }} \ No newline at end of file +--- +{{- end }} +{{- end }} diff --git a/osm-seed/templates/db/db-statefulset.yaml b/osm-seed/templates/db/db-statefulset.yaml index 4422f232..5320f969 100644 --- a/osm-seed/templates/db/db-statefulset.yaml +++ b/osm-seed/templates/db/db-statefulset.yaml @@ -9,7 +9,7 @@ metadata: environment: {{ .Values.environment }} release: {{ .Release.Name }} spec: - replicas: 1 # we only ever want one replica of the db + replicas: 1 selector: matchLabels: app: {{ template "osm-seed.name" . }} @@ -43,10 +43,14 @@ spec: value: {{ default "postgres" .Values.db.env.POSTGRES_USER | quote }} - name: PGUSER value: {{ default "postgres" .Values.db.env.POSTGRES_USER | quote }} + - name: ENVIRONMENT + value: {{ .Values.environment }} - name: PGDATA value: {{ .Values.db.persistenceDisk.mountPath }} - name: POD_IP valueFrom: { fieldRef: { fieldPath: status.podIP } } + - name: LOG_STATEMENT + value: {{ default "none" .Values.db.env.LOG_STATEMENT | quote }} livenessProbe: exec: command: @@ -69,6 +73,8 @@ spec: - name: postgres-storage mountPath: {{ .Values.db.persistenceDisk.mountPath }} subPath: {{ .Values.db.persistenceDisk.subPath }} + - name: shared-memory + mountPath: /dev/shm {{- if .Values.db.resources.enabled }} resources: requests: @@ -86,6 +92,10 @@ spec: {{- else }} emptyDir: {} {{- end }} + - name: shared-memory + emptyDir: + medium: Memory + sizeLimit: {{ .Values.db.sharedMemorySize }} {{- if .Values.db.nodeSelector.enabled }} nodeSelector: {{ .Values.db.nodeSelector.label_key }} : {{ .Values.db.nodeSelector.label_value }} diff --git a/osm-seed/templates/jobs/replication-job-deployment.yaml b/osm-seed/templates/jobs/replication-job-deployment.yaml index afc4ca75..fe2ea3a8 100644 --- a/osm-seed/templates/jobs/replication-job-deployment.yaml +++ b/osm-seed/templates/jobs/replication-job-deployment.yaml @@ -22,6 +22,16 @@ spec: - name: {{ .Release.Name }}-replication-job-deployment image: {{ .Values.replicationJob.image.name }}:{{ .Values.replicationJob.image.tag }} command: ['/start.sh'] + livenessProbe: + exec: + command: + - /bin/bash + - -c + - /liveness.sh + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 10 + failureThreshold: 3 {{- if .Values.replicationJob.resources.enabled }} resources: requests: diff --git a/osm-seed/templates/letsencrypt-issuer.yaml b/osm-seed/templates/letsencrypt-issuer.yaml index 7f73166f..f9fa2aef 100644 --- a/osm-seed/templates/letsencrypt-issuer.yaml +++ b/osm-seed/templates/letsencrypt-issuer.yaml @@ -1,4 +1,4 @@ -{{- if eq .Values.serviceType "ClusterIP" }} +{{- if and (eq .Values.serviceType "ClusterIP") (eq .Values.createClusterIssuer true) }} apiVersion: cert-manager.io/v1 kind: ClusterIssuer metadata: diff --git a/osm-seed/templates/nominatim-api/nominatim-api-ingress.yaml b/osm-seed/templates/nominatim-api/nominatim-api-ingress.yaml index f6ca8f60..e31509cb 100644 --- a/osm-seed/templates/nominatim-api/nominatim-api-ingress.yaml +++ b/osm-seed/templates/nominatim-api/nominatim-api-ingress.yaml @@ -6,14 +6,19 @@ metadata: annotations: kubernetes.io/ingress.class: nginx cert-manager.io/cluster-issuer: letsencrypt-prod-issuer + nginx.ingress.kubernetes.io/proxy-body-size: 5m spec: + ingressClassName: nginx tls: - - hosts: - - nominatim.{{ .Values.domain }} - secretName: {{ template "osm-seed.fullname" . }}-secret-nominatim - + - hosts: + {{- if .Values.nominatimApi.ingressDomain }} + - {{ .Values.nominatimApi.ingressDomain }} + {{- else }} + - nominatim-api.{{ .Values.domain }} + {{- end }} + secretName: {{ template "osm-seed.fullname" . }}-secret-nominatim rules: - - host: nominatim.{{ .Values.domain }} + - host: {{ if .Values.nominatimApi.ingressDomain }}{{ .Values.nominatimApi.ingressDomain }}{{ else }}nominatim-api.{{ .Values.domain }}{{ end }} http: paths: - path: / diff --git a/osm-seed/templates/osmcha-app/configMap.yaml b/osm-seed/templates/osmcha-app/configMap.yaml new file mode 100644 index 00000000..a3aa6ba3 --- /dev/null +++ b/osm-seed/templates/osmcha-app/configMap.yaml @@ -0,0 +1,126 @@ +{{- if .Values.osmchaApi.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-osmcha-common-env +data: + # DB + POSTGRES_HOST: {{ .Release.Name }}-osmcha-db + PGHOST: {{ .Release.Name }}-osmcha-db + POSTGRES_PORT: "5432" + POSTGRES_USER: {{ .Values.osmchaDb.env.POSTGRES_USER | quote }} + POSTGRES_PASSWORD: {{ .Values.osmchaDb.env.POSTGRES_PASSWORD | quote }} + POSTGRES_DATABASE: {{ .Values.osmchaDb.env.POSTGRES_DB | quote }} + POSTGRES_DB: {{ .Values.osmchaDb.env.POSTGRES_DB | quote }} + # API + DJANGO_SECRET_KEY: {{ .Values.osmchaApi.env.DJANGO_SECRET_KEY | quote }} + DJANGO_SETTINGS_MODULE: {{ .Values.osmchaApi.env.DJANGO_SETTINGS_MODULE | quote }} + OSMCHA_FRONTEND_VERSION: {{ .Values.osmchaApi.env.OSMCHA_FRONTEND_VERSION | quote }} + OAUTH_OSM_KEY: {{ .Values.osmchaApi.env.OAUTH_OSM_KEY | quote }} + OAUTH_OSM_SECRET: {{ .Values.osmchaApi.env.OAUTH_OSM_SECRET | quote }} + OAUTH_REDIRECT_URI: {{ .Values.osmchaApi.env.OAUTH_REDIRECT_URI | quote }} + DJANGO_SECURE_SSL_REDIRECT: "False" + DJANGO_DEBUG: "False" + DJANGO_ENV: "production" + OSM_SERVER_URL: {{ .Values.osmchaApi.env.OSM_SERVER_URL | quote }} + DJANGO_DEBUG: "False" + DJANGO_ENV: "production" + REDIS_URL: "redis://localhost:6379" + OSM_PLANET_BASE_URL: {{ .Values.osmchaApi.env.OSM_PLANET_BASE_URL | quote }} + # frontend + OSMCHA_URL: {{ .Values.osmchaApi.env.OSMCHA_URL | quote }} + OSMCHA_API_URL: {{ .Values.osmchaApi.env.OSMCHA_API_URL | quote }} + REACT_APP_OSM_URL: {{ .Values.osmchaApi.env.REACT_APP_OSM_URL | quote }} + REACT_APP_OSM_API: {{ .Values.osmchaApi.env.REACT_APP_OSM_API | quote }} + REACT_APP_OVERPASS_BASE: {{ .Values.osmchaApi.env.REACT_APP_OVERPASS_BASE | quote }} + REACT_APP_DISABLE_REAL_CHANGESETS: {{ .Values.osmchaApi.env.REACT_APP_DISABLE_REAL_CHANGESETS | quote }} + REACT_APP_MAPBOX_ACCESS_TOKEN: {{ .Values.osmchaApi.env.REACT_APP_MAPBOX_ACCESS_TOKEN | quote }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-osmcha-nginx-config +data: + nginx.conf: | + worker_processes 4; + + events { + worker_connections 1024; + } + + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout 65; + + #gzip on; + + upstream app { + server localhost:5000; + } + + server { + listen 80; + charset utf-8; + + location ~ ^/static.* { + root /staticfiles; + try_files $uri @proxy_to_frontend; + } + + location @proxy_to_frontend { + root /assets; + try_files $uri $uri/ =404; + } + + location /api { + try_files $uri $uri/ @django; + } + + location /admin { + try_files $uri $uri/ @django; + } + + location @django { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Host $http_host; + proxy_redirect off; + proxy_pass http://app; + } + + location / { + root /assets; + # checks for static file, if not found proxy to app + try_files $uri $uri/ /index.html; + } + } + } + mime.types: | + types { + text/html html htm shtml; + text/css css; + text/xml xml; + image/gif gif; + image/jpeg jpeg jpg; + application/javascript js; + image/png png; + image/svg+xml svg svgz; + image/tiff tif tiff; + image/x-icon ico; + image/x-jng jng; + application/font-woff woff; + application/json json; + application/zip zip; + } + +{{- end }} diff --git a/osm-seed/templates/osmcha-app/cronJob.yaml b/osm-seed/templates/osmcha-app/cronJob.yaml new file mode 100644 index 00000000..acb11030 --- /dev/null +++ b/osm-seed/templates/osmcha-app/cronJob.yaml @@ -0,0 +1,74 @@ +{{- if .Values.osmchaApi.enabled -}} +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ .Release.Name }}-osmcha-fetch-changesets-cronjob + labels: + app: {{ template "osm-seed.name" . }} + component: osmcha-app-deployment + environment: {{ .Values.environment }} + release: {{ .Release.Name }} +spec: + schedule: "*/2 * * * *" + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 2 + failedJobsHistoryLimit: 2 + jobTemplate: + spec: + template: + spec: + containers: + - name: {{ .Release.Name }}-osmcha-fetch-changesets + image: "{{ .Values.osmchaApi.image.name }}:{{ .Values.osmchaApi.image.tag }}" + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - | + python manage.py fetchchangesets + envFrom: + - configMapRef: + name: {{ .Release.Name }}-osmcha-common-env + restartPolicy: Never + {{- if .Values.osmchaApi.nodeSelector.enabled }} + nodeSelector: + {{ .Values.osmchaApi.nodeSelector.label_key }} : {{ .Values.osmchaApi.nodeSelector.label_value }} + {{- end }} + +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ .Release.Name }}-osmcha-process-changesets-cronjob + labels: + app: {{ template "osm-seed.name" . }} + component: osmcha-app-deployment + environment: {{ .Values.environment }} + release: {{ .Release.Name }} +spec: + schedule: "0 * * * *" + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 2 + failedJobsHistoryLimit: 2 + jobTemplate: + spec: + template: + spec: + containers: + - name: {{ .Release.Name }}-osmcha-process-changesets + image: "{{ .Values.osmchaApi.image.name }}:{{ .Values.osmchaApi.image.tag }}" + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - | + python manage.py backfill_changesets $(date -d "yesterday" +%Y-%m-%d) $(date +%Y-%m-%d) + envFrom: + - configMapRef: + name: {{ .Release.Name }}-osmcha-common-env + restartPolicy: Never + {{- if .Values.osmchaApi.nodeSelector.enabled }} + nodeSelector: + {{ .Values.osmchaApi.nodeSelector.label_key }} : {{ .Values.osmchaApi.nodeSelector.label_value }} + {{- end }} +{{- end }} diff --git a/osm-seed/templates/osmcha-app/deployment.yaml b/osm-seed/templates/osmcha-app/deployment.yaml new file mode 100644 index 00000000..43e2b981 --- /dev/null +++ b/osm-seed/templates/osmcha-app/deployment.yaml @@ -0,0 +1,124 @@ +{{- if .Values.osmchaApi.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-osmcha-app + labels: + app: {{ template "osm-seed.name" . }} + component: osmcha-app-deployment + environment: {{ .Values.environment }} + release: {{ .Release.Name }} +spec: + replicas: {{ .Values.osmchaApi.replicaCount }} + selector: + matchLabels: + app: {{ template "osm-seed.name" . }} + release: {{ .Release.Name }} + run: {{ .Release.Name }}-osmcha-app + template: + metadata: + labels: + app: {{ template "osm-seed.name" . }} + release: {{ .Release.Name }} + run: {{ .Release.Name }}-osmcha-app + spec: + initContainers: + - name: web-builder + image: "{{ .Values.osmchaWeb.image.name }}:{{ .Values.osmchaWeb.image.tag }}" + envFrom: + - configMapRef: + name: {{ .Release.Name }}-osmcha-common-env + volumeMounts: + - name: nginx-assets + mountPath: /assets + - name: init + image: "{{ .Values.osmchaApi.image.name }}:{{ .Values.osmchaApi.image.tag }}" + command: + - /bin/sh + - -c + - | + set -x + flag=true + while "$flag" = true; do + pg_isready -h $POSTGRES_HOST -p 5432 >/dev/null 2>&2 || continue + flag=false + python manage.py migrate + python manage.py collectstatic + mkdir -p /staticfiles/static + cp -r /app/staticfiles/* /staticfiles/static/ + done + envFrom: + - configMapRef: + name: {{ .Release.Name }}-osmcha-common-env + volumeMounts: + - name: staticfiles + mountPath: /staticfiles + # securityContext: + # runAsUser: 0 + containers: + - name: api + # securityContext: + # runAsUser: 0 + image: "{{ .Values.osmchaApi.image.name }}:{{ .Values.osmchaApi.image.tag }}" + command: + - /bin/sh + - -c + - | + set -x + echo "start app...." + gunicorn --workers 4 --bind 0.0.0.0:5000 --log-file - --access-logfile - config.wsgi + ports: + - containerPort: 5000 + livenessProbe: + httpGet: + path: / + port: 5000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + envFrom: + - configMapRef: + name: {{ .Release.Name }}-osmcha-common-env + volumeMounts: + - name: staticfiles + mountPath: /staticfiles + {{- if .Values.osmchaApi.resources.enabled }} + resources: + requests: + memory: {{ .Values.osmchaApi.resources.requests.memory }} + cpu: {{ .Values.osmchaApi.resources.requests.cpu }} + limits: + memory: {{ .Values.osmchaApi.resources.limits.memory }} + cpu: {{ .Values.osmchaApi.resources.limits.cpu }} + {{- end }} + + - name: nginx + image: "nginx:latest" + ports: + - containerPort: 80 + volumeMounts: + - name: osmcha-frontend-nginx-config + mountPath: /etc/nginx + readOnly: true + - name: staticfiles + mountPath: /staticfiles + - name: nginx-assets + mountPath: /assets + - name: redis + image: "redis:latest" + ports: + - containerPort: 6379 + volumes: + - name: staticfiles + emptyDir: {} + - name: nginx-assets + emptyDir: {} + - name: osmcha-frontend-nginx-config + configMap: + name: {{ .Release.Name }}-osmcha-nginx-config + defaultMode: 0777 + {{- if .Values.osmchaApi.nodeSelector.enabled }} + nodeSelector: + {{ .Values.osmchaApi.nodeSelector.label_key }} : {{ .Values.osmchaApi.nodeSelector.label_value }} + {{- end }} +{{- end }} + diff --git a/osm-seed/templates/osmcha-app/ingress.yaml b/osm-seed/templates/osmcha-app/ingress.yaml new file mode 100644 index 00000000..387f8448 --- /dev/null +++ b/osm-seed/templates/osmcha-app/ingress.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.osmchaApi.enabled (eq .Values.serviceType "ClusterIP") }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ template "osm-seed.fullname" . }}-ingress-osmcha-app + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: letsencrypt-prod-issuer + nginx.ingress.kubernetes.io/proxy-body-size: 5m +spec: + ingressClassName: nginx + tls: + - hosts: + {{- if .Values.osmchaApi.ingressDomain }} + - {{ .Values.osmchaApi.ingressDomain }} + {{- else }} + - osmcha.{{ .Values.domain }} + {{- end }} + secretName: {{ template "osm-seed.fullname" . }}-secret-osmcha + rules: + - host: {{ if .Values.osmchaApi.ingressDomain }}{{ .Values.osmchaApi.ingressDomain }}{{ else }}osmcha.{{ .Values.domain }}{{ end }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ template "osm-seed.fullname" . }}-osmcha-app + port: + number: 80 +{{- end }} diff --git a/osm-seed/templates/osmcha-app/service.yaml b/osm-seed/templates/osmcha-app/service.yaml new file mode 100644 index 00000000..985cf171 --- /dev/null +++ b/osm-seed/templates/osmcha-app/service.yaml @@ -0,0 +1,43 @@ +{{- if .Values.osmchaApi.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "osm-seed.fullname" . }}-osmcha-app + labels: + app: {{ template "osm-seed.name" . }} + component: osmcha-app-service + environment: {{ .Values.environment }} + release: {{ .Release.Name }} + annotations: + {{- if and (eq .Values.serviceType "LoadBalancer") .Values.AWS_SSL_ARN }} + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: {{ .Values.AWS_SSL_ARN }} + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: https + {{- end }} + {{- if eq .Values.serviceType "ClusterIP" }} + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: letsencrypt-prod-issuer + {{- else }} + fake.annotation: fake + {{- end }} + {{- with .Values.osmchaApi.serviceAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.serviceType }} + ports: + - protocol: TCP + port: 80 + targetPort: 80 + name: http + {{- if and (eq .Values.serviceType "LoadBalancer") .Values.AWS_SSL_ARN }} + - port: 443 + targetPort: 80 + protocol: TCP + name: https + {{- end }} + selector: + app: {{ template "osm-seed.name" . }} + release: {{ .Release.Name }} + run: {{ .Release.Name }}-osmcha-app +{{- end }} diff --git a/osm-seed/templates/osmcha-db/osmcha-db-pd.yaml b/osm-seed/templates/osmcha-db/osmcha-db-pd.yaml new file mode 100644 index 00000000..e3ad67da --- /dev/null +++ b/osm-seed/templates/osmcha-db/osmcha-db-pd.yaml @@ -0,0 +1,98 @@ +{{- if .Values.osmchaDb.enabled -}} +{{- if .Values.osmchaDb.persistenceDisk.enabled -}} +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ .Release.Name }}-osmcha-db-pv + labels: + app: {{ template "osm-seed.name" . }} + component: db-osmcha-pv + environment: {{ .Values.environment }} + release: {{ .Release.Name }} +spec: + accessModes: + - ReadWriteOnce + ############## Minikube ############## + {{- if eq .Values.cloudProvider "minikube" }} + storageClassName: local-storage + capacity: + storage: {{ .Values.osmchaDb.persistenceDisk.localVolumeSize }} + hostPath: + path: {{ .Values.osmchaDb.persistenceDisk.localVolumeHostPath | quote }} + {{- end }} + + ############## AWS Provider ############## + {{- if eq .Values.cloudProvider "aws" }} + storageClassName: "" + capacity: + storage: {{ .Values.osmchaDb.persistenceDisk.AWS_ElasticBlockStore_size }} + awsElasticBlockStore: + volumeID: {{ .Values.osmchaDb.persistenceDisk.AWS_ElasticBlockStore_volumeID }} + fsType: ext4 + {{- end }} + + ############## GCP Provider ############## + {{- if eq .Values.cloudProvider "gcp" }} + storageClassName: "" + capacity: + storage: {{ .Values.osmchaDb.persistenceDisk.GCP_gcePersistentDisk_size }} + gcePersistentDisk: + pdName: {{ .Values.osmchaDb.persistenceDisk.GCP_gcePersistentDisk_pdName }} + fsType: ext4 + {{- end }} + + ############## Azure Provider ############## + {{- if eq .Values.cloudProvider "azure" }} + storageClassName: "" + capacity: + storage: {{ .Values.osmchaDb.persistenceDisk.AZURE_diskSize }} + persistentVolumeReclaimPolicy: Retain + azureDisk: + kind: Managed + diskName: {{ .Values.osmchaDb.persistenceDisk.AZURE_diskName }} + diskURI: {{ .Values.osmchaDb.persistenceDisk.AZURE_diskURI }} + {{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Release.Name }}-osmcha-db-pv-claim + labels: + app: {{ template "osm-seed.name" . }} + component: db-osmcha-pvc + environment: {{ .Values.environment }} + release: {{ .Release.Name }} +spec: + ############## Minikube ############## + {{- if eq .Values.cloudProvider "minikube" }} + storageClassName: local-storage + {{- else }} + ############## Other providers ############## + storageClassName: "" + {{- end }} + volumeName: {{ .Release.Name }}-osmcha-db-pv + accessModes: + - ReadWriteOnce + resources: + requests: + ############## Minikube ############## + {{- if eq .Values.cloudProvider "minikube" }} + storage: {{ .Values.osmchaDb.persistenceDisk.localVolumeSize }} + {{- end }} + + ############## AWS Provider ############## + {{- if eq .Values.cloudProvider "aws" }} + storage: {{ .Values.osmchaDb.persistenceDisk.AWS_ElasticBlockStore_size }} + {{- end }} + + ############## GCP Provider ############## + {{- if eq .Values.cloudProvider "gcp" }} + storage: {{ .Values.osmchaDb.persistenceDisk.GCP_gcePersistentDisk_size }} + {{- end }} + + ############## AZURE Provider ############## + {{- if eq .Values.cloudProvider "azure" }} + storage: {{ .Values.osmchaDb.persistenceDisk.AZURE_diskSize }} + {{- end }} +{{- end }} +{{- end }} diff --git a/osm-seed/templates/osmcha-db/osmcha-db-service.yaml b/osm-seed/templates/osmcha-db/osmcha-db-service.yaml new file mode 100644 index 00000000..4ce6b3a7 --- /dev/null +++ b/osm-seed/templates/osmcha-db/osmcha-db-service.yaml @@ -0,0 +1,19 @@ +{{- if .Values.osmchaDb.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-osmcha-db + labels: + app: {{ template "osm-seed.name" . }} + component: osmcha-db-service + environment: {{ .Values.environment }} + release: {{ .Release.Name }} +spec: + ports: + - port: 5432 + targetPort: 5432 + selector: + app: {{ template "osm-seed.name" . }} + release: {{ .Release.Name }} + run: {{ .Release.Name }}-osmcha-db +{{- end }} \ No newline at end of file diff --git a/osm-seed/templates/osmcha-db/osmcha-db-statefulset.yaml b/osm-seed/templates/osmcha-db/osmcha-db-statefulset.yaml new file mode 100644 index 00000000..78d32dbd --- /dev/null +++ b/osm-seed/templates/osmcha-db/osmcha-db-statefulset.yaml @@ -0,0 +1,98 @@ +{{- if .Values.osmchaDb.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .Release.Name }}-osmcha-db + labels: + app: {{ template "osm-seed.name" . }} + component: db-tiler-statefulset + environment: {{ .Values.environment }} + release: {{ .Release.Name }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ template "osm-seed.name" . }} + release: {{ .Release.Name }} + run: {{ .Release.Name }}-osmcha-db + serviceName: {{ .Release.Name }}-osmcha-db + template: + metadata: + labels: + app: {{ template "osm-seed.name" . }} + release: {{ .Release.Name }} + run: {{ .Release.Name }}-osmcha-db + spec: + # Remove lost+found in EBS disk + initContainers: + - name: remove-lost-found + image: busybox + command: ['sh', '-c', 'rm -rf /var/lib/postgresql/data/lost+found'] + volumeMounts: + - name: postgres-storage + mountPath: /var/lib/postgresql/data + containers: + - name: {{ .Chart.Name }}-osmcha-db + image: "{{ .Values.osmchaDb.image.name }}:{{ .Values.osmchaDb.image.tag }}" + ports: + - name: postgres + containerPort: 5432 + protocol: TCP + env: + - name: PGHOST + value: {{ .Release.Name }}-osmcha-db + - name: POSTGRES_DB + value: {{ .Values.osmchaDb.env.POSTGRES_DB }} + - name: POSTGRES_PASSWORD + value: {{ quote .Values.osmchaDb.env.POSTGRES_PASSWORD }} + - name: POSTGRES_USER + value: {{ default "postgres" .Values.osmchaDb.env.POSTGRES_USER | quote }} + - name: PGUSER + value: {{ default "postgres" .Values.osmchaDb.env.POSTGRES_USER | quote }} + - name: PGDATA + value: {{ .Values.osmchaDb.persistenceDisk.mountPath }} + - name: POD_IP + valueFrom: { fieldRef: { fieldPath: status.podIP } } + livenessProbe: + exec: + command: + - sh + - -c + - exec pg_isready --host $POD_IP + initialDelaySeconds: 120 + timeoutSeconds: 5 + failureThreshold: 6 + readinessProbe: + exec: + command: + - sh + - -c + - exec pg_isready --host $POD_IP + initialDelaySeconds: 5 + timeoutSeconds: 3 + periodSeconds: 5 + volumeMounts: + - name: postgres-storage + mountPath: {{ .Values.osmchaDb.persistenceDisk.mountPath }} + {{- if .Values.osmchaDb.resources.enabled }} + resources: + requests: + memory: {{ .Values.osmchaDb.resources.requests.memory }} + cpu: {{ .Values.osmchaDb.resources.requests.cpu }} + limits: + memory: {{ .Values.osmchaDb.resources.limits.memory }} + cpu: {{ .Values.osmchaDb.resources.limits.cpu }} + {{- end }} + volumes: + - name: postgres-storage + {{- if .Values.osmchaDb.persistenceDisk.enabled }} + persistentVolumeClaim: + claimName: {{ .Release.Name }}-osmcha-db-pv-claim + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.osmchaDb.nodeSelector.enabled }} + nodeSelector: + {{ .Values.osmchaDb.nodeSelector.label_key }} : {{ .Values.osmchaDb.nodeSelector.label_value }} + {{- end }} +{{- end }} diff --git a/osm-seed/templates/overpass-api/overpass-api-ingress.yaml b/osm-seed/templates/overpass-api/overpass-api-ingress.yaml index 8ca0e8c1..24d963ff 100644 --- a/osm-seed/templates/overpass-api/overpass-api-ingress.yaml +++ b/osm-seed/templates/overpass-api/overpass-api-ingress.yaml @@ -6,14 +6,19 @@ metadata: annotations: kubernetes.io/ingress.class: nginx cert-manager.io/cluster-issuer: letsencrypt-prod-issuer + nginx.ingress.kubernetes.io/proxy-body-size: 5m spec: + ingressClassName: nginx tls: - - hosts: - - overpass.{{ .Values.domain }} - secretName: {{ template "osm-seed.fullname" . }}-secret-overpass - + - hosts: + {{- if .Values.overpassApi.ingressDomain }} + - {{ .Values.overpassApi.ingressDomain }} + {{- else }} + - overpass-api.{{ .Values.domain }} + {{- end }} + secretName: {{ template "osm-seed.fullname" . }}-secret-overpass rules: - - host: overpass.{{ .Values.domain }} + - host: {{ if .Values.overpassApi.ingressDomain }}{{ .Values.overpassApi.ingressDomain }}{{ else }}overpass-api.{{ .Values.domain }}{{ end }} http: paths: - path: / diff --git a/osm-seed/templates/overpass-api/overpass-api-statefulset.yaml b/osm-seed/templates/overpass-api/overpass-api-statefulset.yaml index 0f5902e1..3b0e6af0 100644 --- a/osm-seed/templates/overpass-api/overpass-api-statefulset.yaml +++ b/osm-seed/templates/overpass-api/overpass-api-statefulset.yaml @@ -30,12 +30,12 @@ spec: - name: http containerPort: 80 protocol: TCP - # livenessProbe: - # httpGet: - # path: / - # port: 80 - # initialDelaySeconds: 600 # 10 min, because the compile process takes time. - # timeoutSeconds: 30 + livenessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 14400 # Normal update takes 2 hours aprrox, afther that start checking liveness + timeoutSeconds: 30 {{- if .Values.overpassApi.resources.enabled }} resources: requests: @@ -46,7 +46,6 @@ spec: cpu: {{ .Values.overpassApi.resources.limits.cpu }} {{- end }} env: - # overpass-api env variables - name: OVERPASS_META value: {{ .Values.overpassApi.env.OVERPASS_META | quote}} - name: OVERPASS_MODE diff --git a/osm-seed/templates/taginfo/taginfo-configMap.yaml b/osm-seed/templates/taginfo/taginfo-configMap.yaml new file mode 100644 index 00000000..c7b76bdc --- /dev/null +++ b/osm-seed/templates/taginfo/taginfo-configMap.yaml @@ -0,0 +1,19 @@ +{{- if .Values.taginfo.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-taginfo-common-env +data: + URL_PLANET_FILE_STATE: {{ .Values.taginfo.env.URL_PLANET_FILE_STATE | quote }} + URL_PLANET_FILE: {{ .Values.taginfo.env.URL_PLANET_FILE | quote }} + URL_HISTORY_PLANET_FILE_STATE: {{ .Values.taginfo.env.URL_HISTORY_PLANET_FILE_STATE | quote }} + URL_HISTORY_PLANET_FILE: {{ .Values.taginfo.env.URL_HISTORY_PLANET_FILE | quote }} + OVERWRITE_CONFIG_URL: {{ .Values.taginfo.env.OVERWRITE_CONFIG_URL | quote }} + TAGINFO_PROJECT_REPO: {{ .Values.taginfo.env.TAGINFO_PROJECT_REPO | quote }} + DOWNLOAD_DB: {{ .Values.taginfo.env.DOWNLOAD_DB | quote }} + CREATE_DB: {{ .Values.taginfo.env.CREATE_DB | quote }} + TIME_UPDATE_INTERVAL: {{ .Values.taginfo.env.TIME_UPDATE_INTERVAL | quote }} + AWS_S3_BUCKET: {{ .Values.taginfo.env.AWS_S3_BUCKET | quote }} + ENVIRONMENT: {{ .Values.taginfo.env.ENVIRONMENT | quote }} + INTERVAL_DOWNLOAD_DATA: {{ .Values.taginfo.env.INTERVAL_DOWNLOAD_DATA | quote}} +{{- end }} diff --git a/osm-seed/templates/taginfo/taginfo-cronJob.yaml b/osm-seed/templates/taginfo/taginfo-cronJob.yaml new file mode 100644 index 00000000..6fb5bc38 --- /dev/null +++ b/osm-seed/templates/taginfo/taginfo-cronJob.yaml @@ -0,0 +1,40 @@ +{{- if .Values.taginfo.enabled -}} +{{- if .Values.taginfo.cronjob.enabled -}} +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ .Release.Name }}-taginfo-cronjob + labels: + app: {{ template "osm-seed.name" . }} + component: taginfo-cronjob + environment: {{ .Values.environment }} + release: {{ .Release.Name }} +spec: + schedule: {{ .Values.taginfo.cronjob.schedule | quote}} + concurrencyPolicy: Forbid + startingDeadlineSeconds: 10 + successfulJobsHistoryLimit: 2 + failedJobsHistoryLimit: 2 + jobTemplate: + spec: + template: + spec: + containers: + - name: {{ .Release.Name }}-taginfo-job + image: "{{ .Values.taginfo.image.name }}:{{ .Values.taginfo.image.tag }}" + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - | + /usr/src/app/start.sh data + envFrom: + - configMapRef: + name: {{ .Release.Name }}-taginfo-common-env + restartPolicy: Never + {{- if .Values.taginfo.cronjob.nodeSelector.enabled }} + nodeSelector: + {{ .Values.taginfo.cronjob.nodeSelector.label_key }} : {{ .Values.taginfo.cronjob.nodeSelector.label_value }} + {{- end }} +{{- end }} +{{- end }} diff --git a/osm-seed/templates/taginfo/taginfo-deployment.yaml b/osm-seed/templates/taginfo/taginfo-deployment.yaml index cb2a2bb0..03230cbb 100644 --- a/osm-seed/templates/taginfo/taginfo-deployment.yaml +++ b/osm-seed/templates/taginfo/taginfo-deployment.yaml @@ -2,29 +2,34 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ template "osm-seed.fullname" . }}-taginfo + name: {{ .Release.Name }}-taginfo-web labels: app: {{ template "osm-seed.name" . }} component: web-deployment environment: {{ .Values.environment }} release: {{ .Release.Name }} spec: - replicas: {{ .Values.taginfo.replicaCount }} + replicas: 1 selector: matchLabels: app: {{ template "osm-seed.name" . }} release: {{ .Release.Name }} - run: {{ .Release.Name }}-taginfo + run: {{ .Release.Name }}-taginfo-web template: metadata: labels: app: {{ template "osm-seed.name" . }} release: {{ .Release.Name }} - run: {{ .Release.Name }}-taginfo + run: {{ .Release.Name }}-taginfo-web spec: containers: - name: {{ .Chart.Name }}-taginfo image: "{{ .Values.taginfo.image.name }}:{{ .Values.taginfo.image.tag }}" + command: + - /bin/sh + - -c + - | + /usr/src/app/start.sh web ports: - name: http containerPort: 80 @@ -33,8 +38,8 @@ spec: httpGet: path: / port: 80 - initialDelaySeconds: 600 # 10 min, because the compile process takes time. - timeoutSeconds: 30 + initialDelaySeconds: 120 + timeoutSeconds: 60 {{- if .Values.taginfo.resources.enabled }} resources: requests: @@ -44,25 +49,9 @@ spec: memory: {{ .Values.taginfo.resources.limits.memory }} cpu: {{ .Values.taginfo.resources.limits.cpu }} {{- end }} - env: - - name: URL_PLANET_FILE_STATE - value: {{ .Values.taginfo.env.URL_PLANET_FILE_STATE }} - - name: URL_PLANET_FILE - value: {{ .Values.taginfo.env.URL_PLANET_FILE }} - - name: URL_HISTORY_PLANET_FILE_STATE - value: {{ .Values.taginfo.env.URL_HISTORY_PLANET_FILE_STATE }} - - name: URL_HISTORY_PLANET_FILE - value: {{ .Values.taginfo.env.URL_HISTORY_PLANET_FILE }} - - name: OVERWRITE_CONFIG_URL - value: {{ .Values.taginfo.env.OVERWRITE_CONFIG_URL }} - - name: TAGINFO_PROJECT_REPO - value: {{ .Values.taginfo.env.TAGINFO_PROJECT_REPO }} - - name: DOWNLOAD_DB - value: {{ .Values.taginfo.env.DOWNLOAD_DB }} - - name: CREATE_DB - value: {{ .Values.taginfo.env.CREATE_DB }} - - name: TIME_UPDATE_INTERVAL - value: {{ .Values.taginfo.env.TIME_UPDATE_INTERVAL }} + envFrom: + - configMapRef: + name: {{ .Release.Name }}-taginfo-common-env {{- if .Values.taginfo.nodeSelector.enabled }} nodeSelector: {{ .Values.taginfo.nodeSelector.label_key }} : {{ .Values.taginfo.nodeSelector.label_value }} diff --git a/osm-seed/templates/taginfo/taginfo-ingress.yaml b/osm-seed/templates/taginfo/taginfo-ingress.yaml index 5db7bce5..c546d2c3 100644 --- a/osm-seed/templates/taginfo/taginfo-ingress.yaml +++ b/osm-seed/templates/taginfo/taginfo-ingress.yaml @@ -6,14 +6,19 @@ metadata: annotations: kubernetes.io/ingress.class: nginx cert-manager.io/cluster-issuer: letsencrypt-prod-issuer + nginx.ingress.kubernetes.io/proxy-body-size: 5m spec: + ingressClassName: nginx tls: - - hosts: - - taginfo.{{ .Values.domain }} - secretName: {{ template "osm-seed.fullname" . }}-secret-taginfo - + - hosts: + {{- if .Values.taginfo.ingressDomain }} + - {{ .Values.taginfo.ingressDomain }} + {{- else }} + - taginfo.{{ .Values.domain }} + {{- end }} + secretName: {{ template "osm-seed.fullname" . }}-secret-taginfo rules: - - host: taginfo.{{ .Values.domain }} + - host: {{ if .Values.taginfo.ingressDomain }}{{ .Values.taginfo.ingressDomain }}{{ else }}taginfo.{{ .Values.domain }}{{ end }} http: paths: - path: / @@ -23,4 +28,4 @@ spec: name: {{ template "osm-seed.fullname" . }}-taginfo port: number: 80 -{{- end }} \ No newline at end of file +{{- end }} diff --git a/osm-seed/templates/taginfo/taginfo-service.yaml b/osm-seed/templates/taginfo/taginfo-service.yaml index e6d2f3c4..1a6e08bb 100644 --- a/osm-seed/templates/taginfo/taginfo-service.yaml +++ b/osm-seed/templates/taginfo/taginfo-service.yaml @@ -39,5 +39,5 @@ spec: selector: app: {{ template "osm-seed.name" . }} release: {{ .Release.Name }} - run: {{ .Release.Name }}-taginfo + run: {{ .Release.Name }}-taginfo-web {{- end }} \ No newline at end of file diff --git a/osm-seed/templates/tasking-manager-api/tasking-manager-api-ingress.yaml b/osm-seed/templates/tasking-manager-api/tasking-manager-api-ingress.yaml deleted file mode 100644 index 45771c75..00000000 --- a/osm-seed/templates/tasking-manager-api/tasking-manager-api-ingress.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if and .Values.tmApi.enabled (eq .Values.serviceType "ClusterIP") }} -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: {{ template "osm-seed.fullname" . }}-ingress-tm-api - annotations: - kubernetes.io/ingress.class: nginx - cert-manager.io/cluster-issuer: letsencrypt-prod-issuer -spec: - tls: - - hosts: - - tm-api.{{ .Values.domain }} - secretName: {{ template "osm-seed.fullname" . }}-secret-tm-api - - rules: - - host: tm-api.{{ .Values.domain }} - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: {{ template "osm-seed.fullname" . }}-tm-api - port: - number: 80 -{{- end }} \ No newline at end of file diff --git a/osm-seed/templates/tasking-manager-api/tasking-manager-api-deployment.yaml b/osm-seed/templates/tasking-manager-api/tm-deployment.yaml similarity index 89% rename from osm-seed/templates/tasking-manager-api/tasking-manager-api-deployment.yaml rename to osm-seed/templates/tasking-manager-api/tm-deployment.yaml index 997cb503..f6f6306e 100644 --- a/osm-seed/templates/tasking-manager-api/tasking-manager-api-deployment.yaml +++ b/osm-seed/templates/tasking-manager-api/tm-deployment.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ template "osm-seed.fullname" . }}-tasking-manager-api + name: {{ .Release.Name }}-tasking-manager-api labels: app: {{ template "osm-seed.name" . }} component: tasking-manager-api-deployment @@ -46,17 +46,17 @@ spec: {{- end }} env: - name: POSTGRES_HOST - value: {{ .Values.tmApi.env.POSTGRES_HOST }} + value: {{ .Release.Name }}-tasking-manager-db - name: POSTGRES_DB - value: {{ .Values.tmApi.env.POSTGRES_DB }} + value: {{ .Values.tmDb.env.POSTGRES_DB }} - name: POSTGRES_PASSWORD - value: {{ quote .Values.tmApi.env.POSTGRES_PASSWORD }} + value: {{ quote .Values.tmDb.env.POSTGRES_PASSWORD }} - name: POSTGRES_USER - value: {{ .Values.tmApi.env.POSTGRES_USER }} + value: {{ .Values.tmDb.env.POSTGRES_USER }} - name: POSTGRES_PORT - value: {{ .Values.tmApi.env.POSTGRES_PORT | quote }} + value: "5432" - name: TM_DB - value: {{ printf "postgresql://%s:%s@%s/%s" .Values.tmApi.env.POSTGRES_USER .Values.tmApi.env.POSTGRES_PASSWORD .Values.tmApi.env.POSTGRES_HOST .Values.tmApi.env.POSTGRES_DB }} + value: {{ printf "postgresql://%s:%s@%s-tasking-manager-db/%s" .Values.tmDb.env.POSTGRES_USER .Values.tmDb.env.POSTGRES_PASSWORD .Release.Name .Values.tmDb.env.POSTGRES_DB }} - name: TM_ORG_NAME value: {{ .Values.tmApi.env.TM_ORG_NAME }} - name: TM_ORG_CODE diff --git a/osm-seed/templates/tasking-manager-api/tm-ingress.yaml b/osm-seed/templates/tasking-manager-api/tm-ingress.yaml new file mode 100644 index 00000000..d8dda37c --- /dev/null +++ b/osm-seed/templates/tasking-manager-api/tm-ingress.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.tmApi.enabled (eq .Values.serviceType "ClusterIP") }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ template "osm-seed.fullname" . }}-ingress-tm-api + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: letsencrypt-prod-issuer + nginx.ingress.kubernetes.io/proxy-body-size: 5m +spec: + ingressClassName: nginx + tls: + - hosts: + {{- if .Values.tmApi.ingressDomain }} + - {{ .Values.tmApi.ingressDomain }} + {{- else }} + - tm-api.{{ .Values.domain }} + {{- end }} + secretName: {{ template "osm-seed.fullname" . }}-secret-tm-api + rules: + - host: {{ if .Values.tmApi.ingressDomain }}{{ .Values.tmApi.ingressDomain }}{{ else }}tm-api.{{ .Values.domain }}{{ end }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ .Release.Name }}-tasking-manager-api + port: + number: 80 +{{- end }} \ No newline at end of file diff --git a/osm-seed/templates/tasking-manager-api/tasking-manager-api-service.yaml b/osm-seed/templates/tasking-manager-api/tm-service.yaml similarity index 95% rename from osm-seed/templates/tasking-manager-api/tasking-manager-api-service.yaml rename to osm-seed/templates/tasking-manager-api/tm-service.yaml index 3f6afa18..33d13b23 100644 --- a/osm-seed/templates/tasking-manager-api/tasking-manager-api-service.yaml +++ b/osm-seed/templates/tasking-manager-api/tm-service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "osm-seed.fullname" . }}-tasking-manager-api + name: {{ .Release.Name }}-tasking-manager-api labels: app: {{ template "osm-seed.name" . }} component: tasking-manager-api-service diff --git a/osm-seed/templates/tasking-manager-db/tm-db-pd.yaml b/osm-seed/templates/tasking-manager-db/tm-db-pd.yaml new file mode 100644 index 00000000..01bcf870 --- /dev/null +++ b/osm-seed/templates/tasking-manager-db/tm-db-pd.yaml @@ -0,0 +1,85 @@ +{{- if .Values.tmDb.enabled -}} +{{- if .Values.tmDb.persistenceDisk.enabled -}} +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ .Release.Name }}-tasking-manager-db-pv + labels: + app: {{ template "osm-seed.name" . }} + component: tm-db-pd + environment: {{ .Values.environment }} + release: {{ .Release.Name }} +spec: + ############## Minikube ############## + {{- if eq .Values.cloudProvider "minikube" }} + storageClassName: local-storage + capacity: + storage: {{ .Values.tmDb.persistenceDisk.localVolumeSize }} + accessModes: + - ReadWriteOnce + hostPath: + path: {{ .Values.tmDb.persistenceDisk.localVolumeHostPath | quote }} + {{- end }} + + ############## AWS Provider ############## + {{- if eq .Values.cloudProvider "aws" }} + storageClassName: "" + capacity: + storage: {{ .Values.tmDb.persistenceDisk.AWS_ElasticBlockStore_size }} + accessModes: + - ReadWriteOnce + awsElasticBlockStore: + volumeID: {{ .Values.tmDb.persistenceDisk.AWS_ElasticBlockStore_volumeID }} + fsType: ext4 + {{- end }} + + ############## GCP Provider ############## + {{- if eq .Values.cloudProvider "gcp" }} + storageClassName: "" + capacity: + storage: {{ .Values.tmDb.persistenceDisk.GCP_gcePersistentDisk_size }} + accessModes: + - ReadWriteOnce + gcePersistentDisk: + pdName: {{ .Values.tmDb.persistenceDisk.GCP_gcePersistentDisk_pdName }} + fsType: ext4 + {{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Release.Name }}-tasking-manager-db-pv-claim + labels: + app: {{ template "osm-seed.name" . }} + component: db-pd + environment: {{ .Values.environment }} + release: {{ .Release.Name }} +spec: + ############## Minikube ############## + {{- if eq .Values.cloudProvider "minikube" }} + storageClassName: local-storage + {{- else }} + ############## Other providers ############## + storageClassName: "" + {{- end }} + volumeName: {{ .Release.Name }}-tasking-manager-db-pv + accessModes: + - ReadWriteOnce + resources: + requests: + ############# Minikube ############## + {{- if eq .Values.cloudProvider "minikube" }} + storage: {{ .Values.tmDb.persistenceDisk.localVolumeSize }} + {{- end }} + + ############## AWS Provider ############## + {{- if eq .Values.cloudProvider "aws" }} + storage: {{ .Values.tmDb.persistenceDisk.AWS_ElasticBlockStore_size }} + {{- end }} + + ############## GCP Provider ############## + {{- if eq .Values.cloudProvider "gcp" }} + storage: {{ .Values.tmDb.persistenceDisk.GCP_gcePersistentDisk_size }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/osm-seed/templates/tasking-manager-db/tm-db-service.yaml b/osm-seed/templates/tasking-manager-db/tm-db-service.yaml new file mode 100644 index 00000000..d9717687 --- /dev/null +++ b/osm-seed/templates/tasking-manager-db/tm-db-service.yaml @@ -0,0 +1,19 @@ +{{- if .Values.tmDb.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-tasking-manager-db + labels: + app: {{ template "osm-seed.name" . }} + component: tm-db-service + environment: {{ .Values.environment }} + release: {{ .Release.Name }} +spec: + ports: + - port: 5432 + targetPort: 5432 + selector: + app: {{ template "osm-seed.name" . }} + release: {{ .Release.Name }} + run: {{ .Release.Name }}-tasking-manager-db +{{- end }} \ No newline at end of file diff --git a/osm-seed/templates/tasking-manager-db/tm-db-statefulset.yaml b/osm-seed/templates/tasking-manager-db/tm-db-statefulset.yaml new file mode 100644 index 00000000..04011193 --- /dev/null +++ b/osm-seed/templates/tasking-manager-db/tm-db-statefulset.yaml @@ -0,0 +1,101 @@ +{{- if .Values.tmDb.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .Release.Name }}-tasking-manager-db + labels: + app: {{ template "osm-seed.name" . }} + component: db-tm-statefulset + environment: {{ .Values.environment }} + release: {{ .Release.Name }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ template "osm-seed.name" . }} + release: {{ .Release.Name }} + run: {{ .Release.Name }}-tasking-manager-db + serviceName: {{ .Release.Name }}-tasking-manager-db + template: + metadata: + labels: + app: {{ template "osm-seed.name" . }} + release: {{ .Release.Name }} + run: {{ .Release.Name }}-tasking-manager-db + spec: + containers: + - name: {{ .Chart.Name }}-tm-db + image: "{{ .Values.tmDb.image.name }}:{{ .Values.tmDb.image.tag }}" + ports: + - name: postgres + containerPort: 5432 + protocol: TCP + env: + - name: POSTGRES_HOST + value: {{ .Release.Name }}-tasking-manager-db + - name: POSTGRES_DB + value: {{ .Values.tmDb.env.POSTGRES_DB }} + - name: POSTGRES_PASSWORD + value: {{ quote .Values.tmDb.env.POSTGRES_PASSWORD }} + - name: POSTGRES_USER + value: {{ default "postgres" .Values.tmDb.env.POSTGRES_USER | quote }} + - name: PGUSER + value: {{ default "postgres" .Values.tmDb.env.POSTGRES_USER | quote }} + - name: PGDATA + value: {{ .Values.tmDb.persistenceDisk.mountPath }} + - name: POSTGRES_DB_MAX_CONNECTIONS + value: {{ default 100 .Values.tmDb.env.POSTGRES_DB_MAX_CONNECTIONS | quote }} + - name: POSTGRES_DB_SHARED_BUFFERS + value: {{ default "128MB" .Values.tmDb.env.POSTGRES_DB_SHARED_BUFFERS | quote }} + - name: POSTGRES_DB_WORK_MEM + value: {{ default "4MB" .Values.tmDb.env.POSTGRES_DB_WORK_MEM | quote }} + - name: POSTGRES_DB_MAINTENANCE_WORK_MEM + value: {{ default "64MB" .Values.tmDb.env.POSTGRES_DB_MAINTENANCE_WORK_MEM | quote }} + - name: POSTGRES_DB_EFFECTIVE_CACHE_SIZE + value: {{ default "4GB" .Values.tmDb.env.POSTGRES_DB_EFFECTIVE_CACHE_SIZE | quote }} + - name: POD_IP + valueFrom: { fieldRef: { fieldPath: status.podIP } } + livenessProbe: + exec: + command: + - sh + - -c + - exec pg_isready --host $POD_IP + initialDelaySeconds: 120 + timeoutSeconds: 5 + failureThreshold: 6 + readinessProbe: + exec: + command: + - sh + - -c + - exec pg_isready --host $POD_IP + initialDelaySeconds: 5 + timeoutSeconds: 3 + periodSeconds: 5 + volumeMounts: + - name: postgres-storage + mountPath: {{ .Values.tmDb.persistenceDisk.mountPath }} + subPath: {{ .Values.tmDb.persistenceDisk.subPath }} + {{- if .Values.tmDb.resources.enabled }} + resources: + requests: + memory: {{ .Values.tmDb.resources.requests.memory }} + cpu: {{ .Values.tmDb.resources.requests.cpu }} + limits: + memory: {{ .Values.tmDb.resources.limits.memory }} + cpu: {{ .Values.tmDb.resources.limits.cpu }} + {{- end }} + volumes: + - name: postgres-storage + {{- if .Values.tmDb.persistenceDisk.enabled }} + persistentVolumeClaim: + claimName: {{ .Release.Name }}-tasking-manager-db-pv-claim + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.tmDb.nodeSelector.enabled }} + nodeSelector: + {{ .Values.tmDb.nodeSelector.label_key }} : {{ .Values.tmDb.nodeSelector.label_value }} + {{- end }} +{{- end }} diff --git a/osm-seed/templates/tiler-imposm/tiler-imposm-statefulset.yaml b/osm-seed/templates/tiler-imposm/tiler-imposm-statefulset.yaml index f25ff266..a5834332 100644 --- a/osm-seed/templates/tiler-imposm/tiler-imposm-statefulset.yaml +++ b/osm-seed/templates/tiler-imposm/tiler-imposm-statefulset.yaml @@ -1,5 +1,4 @@ {{- if .Values.tilerImposm.enabled -}} -{{- if .Values.tilerImposm.persistenceDisk.enabled -}} apiVersion: apps/v1 kind: StatefulSet metadata: @@ -24,6 +23,16 @@ spec: - name: {{ .Release.Name }}-tiler-imposm-statefulset image: {{ .Values.tilerImposm.image.name }}:{{ .Values.tilerImposm.image.tag }} command: ['./start.sh'] + livenessProbe: + exec: + command: + - /bin/bash + - -c + - ./liveness.sh + initialDelaySeconds: 7200 + timeoutSeconds: 5 + periodSeconds: 10 + failureThreshold: 3 {{- if .Values.tilerImposm.resources.enabled }} resources: requests: @@ -37,23 +46,29 @@ spec: - name: POSTGRES_HOST value: {{ .Release.Name }}-tiler-db - name: POSTGRES_DB - value: {{ .Values.tilerDb.env.POSTGRES_DB }} + value: {{ .Values.tilerDb.env.POSTGRES_DB | quote }} - name: POSTGRES_PASSWORD - value: {{ quote .Values.tilerDb.env.POSTGRES_PASSWORD }} + value: {{ .Values.tilerDb.env.POSTGRES_PASSWORD | quote }} - name: POSTGRES_USER - value: {{ .Values.tilerDb.env.POSTGRES_USER }} + value: {{ .Values.tilerDb.env.POSTGRES_USER | quote }} - name: POSTGRES_PORT value: {{ .Values.tilerDb.env.POSTGRES_PORT | quote }} - name: TILER_IMPORT_FROM - value: {{ .Values.tilerImposm.env.TILER_IMPORT_FROM }} + value: {{ .Values.tilerImposm.env.TILER_IMPORT_FROM | quote }} - name: TILER_IMPORT_PBF_URL - value: {{ .Values.tilerImposm.env.TILER_IMPORT_PBF_URL }} + value: {{ .Values.tilerImposm.env.TILER_IMPORT_PBF_URL | quote }} - name: REPLICATION_URL - value: {{ .Values.tilerImposm.env.REPLICATION_URL }} + value: {{ .Values.tilerImposm.env.REPLICATION_URL | quote }} - name: SEQUENCE_NUMBER - value: {{ .Values.tilerImposm.env.SEQUENCE_NUMBER | quote}} + value: {{ .Values.tilerImposm.env.SEQUENCE_NUMBER | quote }} - name: OVERWRITE_STATE - value: {{ quote .Values.tilerImposm.env.OVERWRITE_STATE }} + value: {{ .Values.tilerImposm.env.OVERWRITE_STATE | quote }} + - name: UPLOAD_EXPIRED_FILES + value: {{ .Values.tilerImposm.env.UPLOAD_EXPIRED_FILES | default "true" | quote }} + - name: IMPORT_NATURAL_EARTH + value: {{ .Values.tilerImposm.env.IMPORT_NATURAL_EARTH | default "true" | quote }} + - name: IMPORT_OSM_LAND + value: {{ .Values.tilerImposm.env.IMPORT_OSM_LAND | default "true" | quote }} - name: CLOUDPROVIDER value: {{ .Values.cloudProvider }} # In case cloudProvider=aws @@ -83,4 +98,3 @@ spec: {{ .Values.tilerImposm.nodeSelector.label_key }} : {{ .Values.tilerImposm.nodeSelector.label_value }} {{- end }} {{- end }} -{{- end }} diff --git a/osm-seed/templates/tiler-server/tiler-server-ingress.yaml b/osm-seed/templates/tiler-server/tiler-server-ingress.yaml index d89042f0..7e468713 100644 --- a/osm-seed/templates/tiler-server/tiler-server-ingress.yaml +++ b/osm-seed/templates/tiler-server/tiler-server-ingress.yaml @@ -6,14 +6,19 @@ metadata: annotations: kubernetes.io/ingress.class: nginx cert-manager.io/cluster-issuer: letsencrypt-prod-issuer + nginx.ingress.kubernetes.io/proxy-body-size: 5m spec: + ingressClassName: nginx tls: - - hosts: - - tiler-server.{{ .Values.domain }} - secretName: {{ template "osm-seed.fullname" . }}-secret-tiler-server - + - hosts: + {{- if .Values.tilerServer.ingressDomain }} + - {{ .Values.tilerServer.ingressDomain }} + {{- else }} + - tiler.{{ .Values.domain }} + {{- end }} + secretName: {{ template "osm-seed.fullname" . }}-secret-tiler-server rules: - - host: tiler-server.{{ .Values.domain }} + - host: {{ if .Values.tilerServer.ingressDomain }}{{ .Values.tilerServer.ingressDomain }}{{ else }}tiler.{{ .Values.domain }}{{ end }} http: paths: - path: / diff --git a/osm-seed/templates/web/web-deployment.yaml b/osm-seed/templates/web/web-deployment.yaml index 7f07f511..22f0cf98 100644 --- a/osm-seed/templates/web/web-deployment.yaml +++ b/osm-seed/templates/web/web-deployment.yaml @@ -30,11 +30,23 @@ spec: containerPort: 80 protocol: TCP livenessProbe: + {{- if .Values.web.livenessProbeExec }} + exec: + command: + - /bin/bash + - -c + - ./liveness.sh + initialDelaySeconds: 600 + timeoutSeconds: 5 + periodSeconds: 10 + failureThreshold: 3 + {{- else }} httpGet: path: /api/0.6/capabilities port: 80 initialDelaySeconds: 600 # 10 min, because the compile process takes time. timeoutSeconds: 30 + {{- end }} {{- if .Values.web.resources.enabled }} resources: requests: @@ -78,7 +90,11 @@ spec: # Set SERVER_URL in case we use ingress {{- if eq .Values.serviceType "ClusterIP" }} - name: SERVER_URL - value: web.{{ .Values.domain }} + {{- if .Values.web.ingressDomain }} + value: {{ .Values.web.ingressDomain }} + {{- else }} + value: {{ .Values.domain }} + {{- end }} - name: SERVER_PROTOCOL value : "https" {{- end }} @@ -107,6 +123,26 @@ spec: value: {{ .Values.web.env.NEW_RELIC_LICENSE_KEY | default ""}} - name: NEW_RELIC_APP_NAME value: {{ .Values.web.env.NEW_RELIC_APP_NAME | default ""}} + - name: ORGANIZATION_NAME + value: {{ .Values.web.env.ORGANIZATION_NAME | default "OpenStreetMap"}} + - name: WEBSITE_STATUS + value: {{ .Values.web.env.WEBSITE_STATUS | default "online"}} + - name: API_TIMEOUT + value: {{ .Values.web.env.API_TIMEOUT | default "30" | quote }} + - name: WEB_TIMEOUT + value: {{ .Values.web.env.WEB_TIMEOUT | default "30" | quote }} + # This parameter is specifically if you want to restore a any backup to db, + # you done need to set this values.yaml in case you don't use + - name: BACKUP_FILE_URL + value: {{ .Values.web.env.BACKUP_FILE_URL | default "none" | quote }} + volumeMounts: + - mountPath: /dev/shm + name: shared-memory + volumes: + - emptyDir: + medium: Memory + sizeLimit: {{ .Values.web.sharedMemorySize }} + name: shared-memory {{- if .Values.web.nodeSelector.enabled }} nodeSelector: {{ .Values.web.nodeSelector.label_key }} : {{ .Values.web.nodeSelector.label_value }} diff --git a/osm-seed/templates/web/web-ingress.yaml b/osm-seed/templates/web/web-ingress.yaml index 4df537c9..44bebd3f 100644 --- a/osm-seed/templates/web/web-ingress.yaml +++ b/osm-seed/templates/web/web-ingress.yaml @@ -2,18 +2,25 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: {{ template "osm-seed.fullname" . }}-ingress-web + name: {{ template "osm-seed.fullname" . }}-ingress annotations: kubernetes.io/ingress.class: nginx cert-manager.io/cluster-issuer: letsencrypt-prod-issuer + nginx.ingress.kubernetes.io/proxy-body-size: 100m spec: + ingressClassName: nginx tls: - - hosts: - - web.{{ .Values.domain }} - secretName: {{ template "osm-seed.fullname" . }}-secret-web - + - hosts: + {{- if .Values.web.ingressDomain }} + - {{ .Values.web.ingressDomain }} + {{- else }} + - web.{{ .Values.domain }} + {{- end }} + - api.{{ .Values.domain }} + - {{ .Values.domain }} + secretName: {{ template "osm-seed.fullname" . }}-tls-secret rules: - - host: web.{{ .Values.domain }} + - host: {{ if .Values.web.ingressDomain }}{{ .Values.web.ingressDomain }}{{ else }}web.{{ .Values.domain }}{{ end }} http: paths: - path: / @@ -23,4 +30,24 @@ spec: name: {{ template "osm-seed.fullname" . }}-web port: number: 80 -{{- end }} \ No newline at end of file + - host: api.{{ .Values.domain }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ template "osm-seed.fullname" . }}-web + port: + number: 80 + - host: {{ .Values.domain }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ template "osm-seed.fullname" . }}-web + port: + number: 80 +{{- end }} diff --git a/osm-seed/values.yaml b/osm-seed/values.yaml index 41196f1c..f7bb6374 100644 --- a/osm-seed/values.yaml +++ b/osm-seed/values.yaml @@ -7,31 +7,25 @@ osmSeedVersion: develop-9302179 environment: development # cloudProvider is provider where you are going to deploy osm-seed, it could be: aws, gcp, azure, minikube -cloudProvider: minikube - +cloudProvider: aws # ==================================================================================================== # AWS: In case you are using the cloudProvider=aws set the below variables, We are assuming the nodes has a policies access to S3 # ==================================================================================================== AWS_S3_BUCKET: s3://osm-seed - # ==================================================================================================== -# GCP: In case you are using the cloudProvider=gcp set the below variables, We are assuming the nodes has a policies to upload files to cloud storage +# GCP: In case you are using the cloudProvider=gcp set the below variables, We are assuming the nodes has a policies to upload files to cloud storage # ==================================================================================================== GCP_STORAGE_BUCKET: gs://osm-seed-test - # ==================================================================================================== # AZURE: In case you are using the cloudProvider=azure set the below variables # ==================================================================================================== AZURE_STORAGE_ACCOUNT: osmseed AZURE_CONTAINER_NAME: osm-seed AZURE_STORAGE_CONNECTION_STRING: xyz.. - # ==================================================== # AWS: Specify ARN for SSL certificate, currently assumes a single wildcard cert # ==================================================== - AWS_SSL_ARN: false - # Specify serviceType. # # serviceType can be one of three values: 'NodePort', 'ClusterIP' or 'LoadBalancer' @@ -44,10 +38,10 @@ AWS_SSL_ARN: false # If you specify `LoadBalancer` as the service type, if you also specify # an `AWS_SSL_ARN` that is a wildcart certificate, that will be configured # as the SSL certificate for your services. Else, you will need to configure -# SSL separately. -serviceType: NodePort - - +# SSL separately. +# serviceType: NodePort +serviceType: clusterIP +createClusterIssuer: true # Domain that is pointed to the clusterIP # You will need to create an A record like *.osmseed.example.com pointed to the ClusterIP # Then, the cluster configuration will setup services at their respective subdomains: @@ -55,16 +49,14 @@ serviceType: NodePort # - overpass.osmseed.example.com # - nominatim.osmseed.example.com # - etc. -domain: osmseed.example.com - +domain: dev.osmseed.org # ==================================================================================================== # Configuration for Lets Encrypt setup # ==================================================================================================== # Admin Email address used when generating Lets Encrypt certificates. # You will be notified of expirations, etc. on this email address. -adminEmail: admin@example.com - +adminEmail: admin@osmseed.org # ==================================================================================================== # ==================================================================================================== # ==================================Pods Configurations=============================================== @@ -77,12 +69,14 @@ adminEmail: admin@example.com db: enabled: true image: - name: '' - tag: '' + name: "" + tag: "" env: POSTGRES_DB: openstreetmap POSTGRES_USER: postgres - POSTGRES_PASSWORD: '1234' + POSTGRES_PASSWORD: "1234" + ENVIRONMENT: "production" + LOG_STATEMENT: "all" persistenceDisk: enabled: false mountPath: /var/lib/postgresql/data @@ -103,43 +97,48 @@ db: resources: enabled: false requests: - memory: '1Gi' - cpu: '1' + memory: "1Gi" + cpu: "1" limits: - memory: '2Gi' - cpu: '1' + memory: "2Gi" + cpu: "1" nodeSelector: enabled: false - + sharedMemorySize: 64Mi # ==================================================================================================== # Variables for osm-seed website # ==================================================================================================== web: enabled: true image: - name: '' - tag: '' + name: "" + tag: "" replicaCount: 1 + ingressDomain: www.dev.osmseed.org serviceAnnotations: {} env: MAILER_ADDRESS: smtp.gmail.com MAILER_DOMAIN: gmail.com MAILER_USERNAME: example@gmail.com - MAILER_PASSWORD: '1234' + MAILER_PASSWORD: "1234" MAILER_FROM: no-reply@example.com MAILER_PORT: 25 - NOMINATIM_URL: 'nominatim' - OVERPASS_URL: 'overpassApi' - OAUTH_CLIENT_ID: 'abcd' # Create client ID and key as OAuth2 Application - OAUTH_KEY: 'xyz' + NOMINATIM_URL: "nominatim" + OVERPASS_URL: "overpassApi" + OAUTH_CLIENT_ID: "abcd" # Create client ID and key as OAuth2 Application + OAUTH_KEY: "xyz" + ORGANIZATION_NAME: "OSMSeed" + WEBSITE_STATUS: "online" + API_TIMEOUT: 30 + WEB_TIMEOUT: 30 resources: enabled: false requests: - memory: '1Gi' - cpu: '2' + memory: "1Gi" + cpu: "2" limits: - memory: '2Gi' - cpu: '2' + memory: "2Gi" + cpu: "2" nodeSelector: enabled: false autoscaling: @@ -147,12 +146,13 @@ web: minReplicas: 1 maxReplicas: 3 cpuUtilization: 80 - + sharedMemorySize: 256Mi + livenessProbeExec: true # ==================================================================================================== # Variables for memcached. Memcached is used to store session cookies # ==================================================================================================== memcached: - enabled: false + enabled: true nodeSelector: enabled: false label_key: nodegroup_type @@ -160,31 +160,29 @@ memcached: resources: enabled: false requests: - memory: '8Gi' - cpu: '2' + memory: "8Gi" + cpu: "2" limits: - memory: '8Gi' - cpu: '2' - + memory: "8Gi" + cpu: "2" # ==================================================================================================== # Variables for id-editor # ==================================================================================================== idEditor: enabled: false - # ==================================================================================================== # Variables for full-history container # ==================================================================================================== fullHistory: enabled: false image: - name: '' - tag: '' + name: "" + tag: "" nodeSelector: enabled: false label_key: nodegroup-type label_value: ohm-non-tiler - schedule: '* * * * *' + schedule: "* * * * *" env: OVERWRITE_FHISTORY_FILE: false resources: @@ -195,122 +193,151 @@ fullHistory: limits: memory: "16Gi" cpu: "4" - -# ==================================================================================================== -# Variables for osm-seed database backup and restore -# ==================================================================================================== + # ==================================================================================================== + # Variables for osm-seed database backup and restore + # ==================================================================================================== dbBackupRestore: - enabled: false image: - name: '' - tag: '' - schedule: '* * * * *' - env: - DB_ACTION: backup - resources: - enabled: false - requests: - memory: '300Mi' - cpu: '0.5' - limits: - memory: '400Mi' - cpu: '0.6' - nodeSelector: - enabled: false - + name: "developmentseed/osmseed-backup-restore" + tag: "0.1.0-0.dev.git.942.h994af82" + cronjobs: + - name: web-db + enabled: false + schedule: "* * * * *" + env: + # DB action: backup/restore + DB_ACTION: backup + # Naming backup files + SET_DATE_AT_NAME: true + BACKUP_CLOUD_FOLDER: database/web-db + BACKUP_CLOUD_FILE: osmseed-api-web-db + AWS_S3_BUCKET: secure-bucket + # Clean up backups options + CLEANUP_BACKUPS: true + RETENTION_DAYS: "30" + resources: + enabled: false + requests: + memory: "300Mi" + cpu: "0.5" + limits: + memory: "400Mi" + cpu: "0.6" + nodeSelector: + enabled: false + - name: osmcha-db + enabled: false + schedule: "* * * * *" + env: + # DB action: backup/restore + DB_ACTION: backup + # Naming backup files + SET_DATE_AT_NAME: true + BACKUP_CLOUD_FOLDER: database/osmcha-db + BACKUP_CLOUD_FILE: osmseed-osmcha-db + AWS_S3_BUCKET: secure-bucket + # Clean up backups options + CLEANUP_BACKUPS: true + RETENTION_DAYS: "30" + resources: + enabled: false + requests: + memory: "300Mi" + cpu: "0.5" + limits: + memory: "400Mi" + cpu: "0.6" + nodeSelector: + enabled: false # ==================================================================================================== # Variables for osm-seed for osmosis, this configuration os to get the planet dump files from apidb # ==================================================================================================== planetDump: enabled: false image: - name: '' - tag: '' - schedule: '* * * * *' + name: "" + tag: "" + schedule: "* * * * *" env: OVERWRITE_PLANET_FILE: false resources: enabled: false requests: - memory: '14Gi' - cpu: '4' + memory: "14Gi" + cpu: "4" limits: - memory: '16Gi' - cpu: '4' + memory: "16Gi" + cpu: "4" nodeSelector: enabled: false - # ==================================================================================================== # Variables for replication-job, Configuration to create the replication files by, minute, hour, or day # ==================================================================================================== replicationJob: enabled: false image: - name: '' - tag: '' + name: "" + tag: "" resources: enabled: false requests: - memory: '20Gi' - cpu: '8' + memory: "20Gi" + cpu: "8" limits: - memory: '24Gi' - cpu: '10' + memory: "24Gi" + cpu: "10" nodeSelector: enabled: false - # ==================================================================================================== # Variables for osm-seed to pupulate the apidb # ==================================================================================================== populateApidb: enabled: false image: - name: '' - tag: '' + name: "" + tag: "" env: - URL_FILE_TO_IMPORT: 'http://download.geofabrik.de/europe/monaco-latest.osm.pbf' + URL_FILE_TO_IMPORT: "http://download.geofabrik.de/europe/monaco-latest.osm.pbf" resources: enabled: false requests: - memory: '1Gi' - cpu: '2' + memory: "1Gi" + cpu: "2" limits: - memory: '2Gi' - cpu: '2.5' - + memory: "2Gi" + cpu: "2.5" # ==================================================================================================== # Variables to start a pod to process osm files # ==================================================================================================== osmProcessor: enabled: false image: - name: '' - tag: '' + name: "" + tag: "" env: - URL_FILE_TO_PROCESS: 'http://download.geofabrik.de/europe/monaco-latest.osm.pbf' + URL_FILE_TO_PROCESS: "http://download.geofabrik.de/europe/monaco-latest.osm.pbf" OSM_FILE_ACTION: simple_pbf resources: enabled: false requests: - memory: '14Gi' - cpu: '4' + memory: "14Gi" + cpu: "4" limits: - memory: '16Gi' - cpu: '4' - + memory: "16Gi" + cpu: "4" # ==================================================================================================== # Variables for tiler-db # ==================================================================================================== tilerDb: enabled: false image: - name: '' - tag: '' + name: "" + tag: "" env: POSTGRES_HOST: tiler-db POSTGRES_DB: tiler-osm POSTGRES_USER: postgres - POSTGRES_PASSWORD: '1234' + POSTGRES_PASSWORD: "1234" POSTGRES_PORT: 5432 POSTGRES_DB_MAX_CONNECTIONS: 100 POSTGRES_DB_SHARED_BUFFERS: 128MB @@ -334,28 +361,30 @@ tilerDb: resources: enabled: false requests: - memory: '1Gi' - cpu: '2' + memory: "1Gi" + cpu: "2" limits: - memory: '2Gi' - cpu: '2.5' + memory: "2Gi" + cpu: "2.5" nodeSelector: enabled: false label_key: nodegroup_type label_value: tiler - # ==================================================================================================== # Variables for tiler-imposm # ==================================================================================================== tilerImposm: enabled: false image: - name: '' - tag: '' + name: "" + tag: "" env: TILER_IMPORT_FROM: osm TILER_IMPORT_PBF_URL: http://download.geofabrik.de/europe/monaco-latest.osm.pbf TILER_IMPORT_LIMIT: https://gist.githubusercontent.com/Rub21/96bdcac5eb11f0b36ba8d0352ac537f4/raw/2606f2e207d4a0d895897a83efa1efacefd36eb4/monaco.geojson + UPLOAD_EXPIRED_FILES: true + IMPORT_NATURAL_EARTH: true + IMPORT_OSM_LAND: true persistenceDisk: enabled: false accessMode: ReadWriteOnce @@ -372,11 +401,11 @@ tilerImposm: resources: enabled: false requests: - memory: '20Gi' - cpu: '8' + memory: "20Gi" + cpu: "8" limits: - memory: '24Gi' - cpu: '10' + memory: "24Gi" + cpu: "10" nodeSelector: enabled: false label_key: nodegroup_type @@ -387,10 +416,11 @@ tilerImposm: tilerServer: enabled: false image: - name: '' - tag: '' + name: "" + tag: "" replicaCount: 1 serviceAnnotations: {} + ingressDomain: tiler2.dev.openhistoricalmap.org env: TILER_SERVER_PORT: 9090 TILER_CACHE_TYPE: s3 #In case TILER_CACHE_TYPE=s3, the kubernetes teemplate will come up with a deployment pod @@ -401,7 +431,7 @@ tilerServer: TILER_CACHE_REGION: us-east-1 TILER_CACHE_AWS_ACCESS_KEY_ID: xyz TILER_CACHE_AWS_SECRET_ACCESS_KEY: xyz/q - command: './start.sh' # ./start.sh & ./tile_cache_downloader.sh & ./expire-watcher.sh + command: "./start.sh" # ./start.sh & ./tile_cache_downloader.sh & ./expire-watcher.sh # In case you use TILER_CACHE_TYPE: file with persistenceDisk persistenceDisk: enabled: false @@ -419,30 +449,32 @@ tilerServer: resources: enabled: false requests: - memory: '1Gi' - cpu: '2' + memory: "1Gi" + cpu: "2" limits: - memory: '2Gi' - cpu: '2' + memory: "2Gi" + cpu: "2" nodeSelector: enabled: false label_key: nodegroup_type label_value: tiler + autoscaling: + enabled: false # ==================================================================================================== -# Variables for tiler-server cache cleaner, only avaliable in case the TILER_CACHE_TYPE = s3 +# Variables for tiler-server cache cleaner, only avaliable in case the TILER_CACHE_TYPE = s3 # ==================================================================================================== tilerServerCacheCleaner: enabled: false replicaCount: 1 - command: './tile_cache_downloader.sh & ./expire-watcher.sh' + command: "./tile_cache_downloader.sh & ./expire-watcher.sh" resources: enabled: false requests: - memory: '1Gi' - cpu: '2' + memory: "1Gi" + cpu: "2" limits: - memory: '2Gi' - cpu: '2' + memory: "2Gi" + cpu: "2" env: KILL_PROCESS: manually MAX_NUM_PS: 3 @@ -462,8 +494,8 @@ tilerServerCacheCleaner: tilerVisor: enabled: false image: - name: '' - tag: '' + name: "" + tag: "" serviceAnnotations: {} replicaCount: 1 env: @@ -472,75 +504,99 @@ tilerVisor: resources: enabled: false requests: - memory: '1Gi' - cpu: '2' + memory: "1Gi" + cpu: "2" limits: - memory: '2Gi' - cpu: '2' + memory: "2Gi" + cpu: "2" nodeSelector: enabled: false label_key: nodegroup_type label_value: tiler # ==================================================================================================== +# Variables for Tasking Manager DB +# ==================================================================================================== +tmDb: + enabled: false + image: + name: "postgres" + tag: "11" + nodeSelector: + enabled: false + label_key: nodegroup_type + label_value: web + env: + POSTGRES_DB: tm + POSTGRES_PASSWORD: tm + POSTGRES_USER: postgres + resources: + enabled: false + requests: + memory: "1Gi" + cpu: "2" + limits: + memory: "2Gi" + cpu: "2" +# ==================================================================================================== # Variables for Tasking Manager API # ==================================================================================================== tmApi: enabled: false image: - name: '' - tag: '' + name: "" + tag: "" nodeSelector: enabled: false replicaCount: 1 + ingressDomain: tm-api.dev.osmseed.org serviceAnnotations: {} env: - POSTGRES_HOST: 'tm' - POSTGRES_DB: 'tm' - POSTGRES_PASSWORD: 'tm' - POSTGRES_USER: 'postgres' + POSTGRES_HOST: "tm" + POSTGRES_DB: "tm" + POSTGRES_PASSWORD: "tm" + POSTGRES_USER: "postgres" POSTGRES_PORT: 5432 - TM_ORG_NAME: 'OpenStreetMap' - TM_ORG_CODE: 'OSM' - TM_ORG_URL: 'openstreetmap.org' - TM_ORG_PRIVACY_POLICY_URL: 'openstreetmap.org/copyright' - TM_ORG_GITHUB: 'github.com/openstreetmap' - OSM_SERVER_URL: 'https://openstreetmap.org' - OSM_NOMINATIM_SERVER_URL: 'https://nominatim.openstreetmap.org' - OSM_REGISTER_URL: 'https://openstreetmap.org/user/new' - ID_EDITOR_URL: 'https://openstreetmap.org/edit?editor=id' - POTLATCH2_EDITOR_URL: 'https://openstreetmap.org/edit?editor=potlatch2' - TM_SECRET: 's0m3l0ngr4nd0mstr1ng-b3cr34tiv3' - TM_CONSUMER_KEY: 'example' - TM_CONSUMER_SECRET: 'example' - TM_EMAIL_FROM_ADDRESS: 'no-reply@openstreetmap.org' - TM_SMTP_HOST: 'email-smtp.us-east-1.amazonaws.com' + TM_ORG_NAME: "OpenStreetMap" + TM_ORG_CODE: "OSM" + TM_ORG_URL: "openstreetmap.org" + TM_ORG_PRIVACY_POLICY_URL: "openstreetmap.org/copyright" + TM_ORG_GITHUB: "github.com/openstreetmap" + OSM_SERVER_URL: "https://openstreetmap.org" + OSM_NOMINATIM_SERVER_URL: "https://nominatim.openstreetmap.org" + OSM_REGISTER_URL: "https://openstreetmap.org/user/new" + ID_EDITOR_URL: "https://openstreetmap.org/edit?editor=id" + POTLATCH2_EDITOR_URL: "https://openstreetmap.org/edit?editor=potlatch2" + TM_SECRET: "s0m3l0ngr4nd0mstr1ng-b3cr34tiv3" + TM_CONSUMER_KEY: "example" + TM_CONSUMER_SECRET: "example" + TM_EMAIL_FROM_ADDRESS: "no-reply@openstreetmap.org" + TM_SMTP_HOST: "email-smtp.us-east-1.amazonaws.com" TM_SMTP_PORT: 25 - TM_SMTP_USER: '' - TM_SMTP_PASSWORD: '' - TM_DEFAULT_LOCALE: 'en' - TM_APP_API_URL: '' - TM_APP_BASE_URL: '' + TM_SMTP_USER: "" + TM_SMTP_PASSWORD: "" + TM_DEFAULT_LOCALE: "en" + TM_APP_API_URL: "" + TM_APP_BASE_URL: "" resources: enabled: false requests: - memory: '1Gi' - cpu: '2' + memory: "1Gi" + cpu: "2" limits: - memory: '2Gi' - cpu: '2' - - + memory: "2Gi" + cpu: "2" # ==================================================================================================== # Variables for nominatim api # ==================================================================================================== nominatimApi: - enabled: true + enabled: false image: - name: '' - tag: '' + name: "" + tag: "" serviceAnnotations: {} replicaCount: 1 + ingressDomain: nominatim-api.dev.osmseed.org env: PBF_URL: http://download.geofabrik.de/europe/monaco-latest.osm.pbf REPLICATION_URL: http://download.geofabrik.de/europe/monaco-updates @@ -560,11 +616,11 @@ nominatimApi: resources: enabled: false requests: - memory: '1Gi' - cpu: '2' + memory: "1Gi" + cpu: "2" limits: - memory: '2Gi' - cpu: '2' + memory: "2Gi" + cpu: "2" persistenceDisk: enabled: true accessMode: ReadWriteOnce @@ -581,24 +637,23 @@ nominatimApi: GCP_gcePersistentDisk_size: 50Gi nodeSelector: enabled: false - # ==================================================================================================== # Variables for overpass-api # ==================================================================================================== overpassApi: enabled: false image: - name: '' - tag: '' + name: "" + tag: "" serviceAnnotations: {} + ingressDomain: overpass-api.dev.osmseed.org env: - OVERPASS_META: 'yes' + OVERPASS_META: "yes" OVERPASS_MODE: init OVERPASS_PLANET_URL: http://download.geofabrik.de/europe/monaco-latest.osm.bz2 OVERPASS_DIFF_URL: http://download.openstreetmap.fr/replication/europe/monaco/minute/ OVERPASS_RULES_LOAD: 10 - #OVERPASS_PLANET_PREPROCESS: 'mv /db/planet.osm.bz2 /db/planet.osm.pbf && osmium cat -o /db/planet.osm.bz2 /db/planet.osm.pbf && rm /db/planet.osm.pbf' # it is in case we pass planet files as PBF file uncommment this line - OVERPASS_PLANET_PREPROCESS: 'ls' + OVERPASS_PLANET_PREPROCESS: 'mv /db/planet.osm.bz2 /db/planet.osm.pbf && osmium cat -o /db/planet.osm.bz2 /db/planet.osm.pbf && rm /db/planet.osm.pbf' OVERPASS_REPLICATION_SEQUENCE_NUMBER: 5201000 persistenceDisk: enabled: false @@ -616,11 +671,11 @@ overpassApi: resources: enabled: false requests: - memory: '1Gi' - cpu: '2' + memory: "1Gi" + cpu: "2" limits: - memory: '2Gi' - cpu: '2' + memory: "2Gi" + cpu: "2" nodeSelector: enabled: false # ==================================================================================================== @@ -629,9 +684,10 @@ overpassApi: taginfo: enabled: false image: - name: '' - tag: '' + name: "" + tag: "" serviceAnnotations: {} + ingressDomain: taginfo.dev.osmseed.org env: URL_PLANET_FILE_STATE: https://planet.osm.org/pbf/state.txt URL_HISTORY_PLANET_FILE_STATE: https://planet.osm.org/pbf/full-history/state.txt @@ -643,16 +699,19 @@ taginfo: INSTANCE_ICON: /img/logo/world.png INSTANCE_CONTACT: Anonymous TAGINFO_PROJECT_REPO: https://github.com/taginfo/taginfo-projects.git - DOWNLOAD_DB: 'languages wiki' - CREATE_DB: 'db projects chronology' + DOWNLOAD_DB: "languages wiki" + CREATE_DB: "db projects chronology" + ENVIRONMENT: development + AWS_S3_BUCKET: taginfo + INTERVAL_DOWNLOAD_DATA: 3600 resources: enabled: false requests: - memory: '1Gi' - cpu: '2' + memory: "1Gi" + cpu: "2" limits: - memory: '2Gi' - cpu: '2' + memory: "2Gi" + cpu: "2" nodeSelector: enabled: false # ==================================================================================================== @@ -661,64 +720,153 @@ taginfo: osmSimpleMetrics: enabled: false image: - name: '' - tag: '' - schedule: '* */1 * * *' + name: "" + tag: "" + schedule: "* */1 * * *" resources: enabled: false requests: - memory: '1Gi' - cpu: '2' + memory: "1Gi" + cpu: "2" limits: - memory: '2Gi' - cpu: '2' + memory: "2Gi" + cpu: "2" nodeSelector: enabled: false - # ==================================================================================================== # Variables for osm-simple-metrics # ==================================================================================================== monitoringReplication: enabled: false image: - name: 'developmentseed/osmseed-replication-job' - tag: '0.1.0-n770.hd15c9f2' - schedule: '* */1 * * *' + name: "developmentseed/osmseed-replication-job" + tag: "0.1.0-n770.hd15c9f2" + schedule: "* */1 * * *" env: CREATE_MISSING_FILES: empty REPLICATION_SEQUENCE_NUMBER: "109789" resources: enabled: false requests: - memory: '1Gi' - cpu: '2' + memory: "1Gi" + cpu: "2" limits: - memory: '2Gi' - cpu: '2' + memory: "2Gi" + cpu: "2" nodeSelector: enabled: false - # ==================================================================================================== # Variables for changeset-replication-job, Configuration to create the replication files by, minute, hour, or day # ==================================================================================================== changesetReplicationJob: enabled: false image: - name: '' - tag: '' + name: "" + tag: "" resources: enabled: false requests: - memory: '20Gi' - cpu: '8' + memory: "20Gi" + cpu: "8" limits: - memory: '24Gi' - cpu: '10' + memory: "24Gi" + cpu: "10" nodeSelector: enabled: false # ==================================================================================================== -# Variables for cgmap config +# Variables for osmcha web builder +# ==================================================================================================== +osmchaWeb: + enabled: false + image: + name: "" + tag: "" +# ==================================================================================================== +# Variables for osmcha Api +# ==================================================================================================== +osmchaApi: + enabled: false + image: + name: "ghcr.io/osmcha/osmcha-django" + tag: "61a80e897b45fb9b1a177bf433fb79bfa7fda59d" + ingressDomain: osmcha.dev.osmseed.org + env: + DJANGO_SETTINGS_MODULE: "config.settings.production" + OSMCHA_FRONTEND_VERSION: "v0.86.0-production" + DJANGO_SECRET_KEY: abc + ## ohm + OAUTH_OSM_KEY: abc + OAUTH_OSM_SECRET: bdc + DJANGO_SECURE_SSL_REDIRECT: "False" + OSM_SERVER_URL: https://www.osmseed.org + OAUTH_REDIRECT_URI: https://osmcha-staging.osmseed.org/oauth-landing.html + OSM_PLANET_BASE_URL: https://s3.amazonaws.com/planet.osmseed.org/replication/changesets/ + ## frontend + OSMCHA_URL: https://osmcha-staging.osmseed.org + OSMCHA_API_URL: www.osmseed.org + REACT_APP_OSM_URL: https://www.osmseed.org + REACT_APP_OSM_API: https://www.osmseed.org/api/0.6 + REACT_APP_OVERPASS_BASE: //overpass-api.osmseed.org/api/interpreter + REACT_APP_DISABLE_REAL_CHANGESETS: 1 + REACT_APP_MAPBOX_ACCESS_TOKEN: "pk.abc.df" + resources: + enabled: false + requests: + memory: "512Mi" + cpu: "1" + limits: + memory: "512Mi" + cpu: "1" + nodeSelector: + enabled: false + label_key: nodegroup_type + label_value: db +# ==================================================================================================== +# Variables for osmcha DB +# ==================================================================================================== +osmchaDb: + enabled: false + image: + name: "" + tag: "" + env: + POSTGRES_DB: osmcha-db + POSTGRES_USER: postgres + POSTGRES_PASSWORD: "1234" + resources: + enabled: false + requests: + memory: "20Gi" + cpu: "8" + limits: + memory: "24Gi" + cpu: "10" + persistenceDisk: + enabled: false + accessMode: ReadWriteOnce + mountPath: /var/lib/postgresql/data + # Minikube + localVolumeHostPath: /mnt/db-data/osmcha-data + localVolumeSize: 10Gi + # AWS + AWS_ElasticBlockStore_volumeID: vol-1234bcd + AWS_ElasticBlockStore_size: 50Gi + # GCP + GCP_gcePersistentDisk_pdName: osmseed-osmcha-disk--v1 + GCP_gcePersistentDisk_size: 50Gi + nodeSelector: + enabled: false +# ==================================================================================================== +# Planet server files +# ==================================================================================================== +planetFiles: + enabled: false + image: + name: "" + tag: "" +# ==================================================================================================== +# Cgimap # ==================================================================================================== cgimap: enabled: false @@ -734,4 +882,4 @@ cgimap: memory: '24Gi' cpu: '10' nodeSelector: - enabled: false + enabled: false \ No newline at end of file