diff --git a/chartpress.yaml b/chartpress.yaml
index f5af3ddc..388d0d44 100644
--- a/chartpress.yaml
+++ b/chartpress.yaml
@@ -46,4 +46,6 @@ charts:
valuesPath: osmchaWeb.image
osmcha-db:
valuesPath: osmchaDb.image
-
\ No newline at end of file
+ planet-files:
+ valuesPath: planetFiles.image
+
diff --git a/compose/db-backup-restore.yml b/compose/db-backup-restore.yml
index 7641cde8..982e94ad 100644
--- a/compose/db-backup-restore.yml
+++ b/compose/db-backup-restore.yml
@@ -10,11 +10,7 @@ services:
dockerfile: Dockerfile
volumes:
- ../data/db-backup-restore-data:/mnt/data
- command: >
- /bin/bash -c "
- echo 'Set cron job for backing up DB, every 4 minutes';
- while :; do echo 'Creating DB backup...'; /start.sh; sleep 4m; done;
- "
+ command: /start.sh
env_file:
- ../envs/.env.db
- ../envs/.env.osmcha
diff --git a/compose/full-history.yml b/compose/full-history.yml
deleted file mode 100644
index f5bf586a..00000000
--- a/compose/full-history.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-version: '3'
-services:
- #####################################################
- ## OSM full planet replication
- #####################################################
- full-history:
- image: osmseed-full-history:v1
- build:
- context: ../images/full-history
- dockerfile: Dockerfile
- volumes:
- - ../data/full-history-data:/mnt/data
- command: >
- /bin/bash -c "
- echo 'Set cron job for full history PBF file, every 4 minutes';
- while :; do echo 'Creating full history PBF file...'; /start.sh; sleep 4m; done;
- "
- env_file:
- - ../envs/.env.db
- - ../envs/.env.db-utils
- - ../envs/.env.cloudprovider
-
\ No newline at end of file
diff --git a/compose/planet-dump.yml b/compose/planet-dump.yml
deleted file mode 100644
index b265a768..00000000
--- a/compose/planet-dump.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-version: '3'
-services:
- ######################################################
- ### OSM processor and also use this image as base for other containers
- ######################################################
- osm-processor:
- image: osmseed-osm-processor:v1
- build:
- context: ../images/osm-processor
- dockerfile: Dockerfile
- ######################################################
- ### Planet replication section
- ######################################################
- planet-dump:
- image: osmseed-planet-dump:v1
- build:
- context: ./images/planet-dump
- dockerfile: Dockerfile
- volumes:
- - ../data/planet-dump-data:/mnt/data
- command: >
- /bin/bash -c "
- echo 'Set cronjob for planet-dump, every 4 minutes';
- while :; do echo 'Creating the planet dump file...'; /start.sh; sleep 4m; done;
- "
- env_file:
- - ../envs/.env.db
- - ../envs/.env.db-utils
- - ../envs/.env.cloudprovider
-
diff --git a/compose/planet.yml b/compose/planet.yml
new file mode 100644
index 00000000..bb0e31c9
--- /dev/null
+++ b/compose/planet.yml
@@ -0,0 +1,89 @@
+version: '3'
+services:
+ ######################################################
+ ### OSM minute replication files section
+ ######################################################
+ replication-job:
+ image: osmseed-replication-job:v1
+ build:
+ context: ../images/replication-job
+ dockerfile: Dockerfile
+ volumes:
+ - ../data/replication-job-data:/mnt/data
+ # - ./../images/replication-job:/app
+ command: /start.sh
+ env_file:
+ - ../envs/.env.db
+ - ../envs/.env.db-utils
+ - ../envs/.env.cloudprovider
+
+ ######################################################
+ ### Planet replication section
+ ######################################################
+ planet-dump:
+ image: osmseed-planet-dump:v1
+ build:
+ context: ../images/planet-dump
+ dockerfile: Dockerfile
+ volumes:
+ - ../data/planet-dump-data:/mnt/data
+ command: /start.sh;
+ env_file:
+ - ../envs/.env.db
+ - ../envs/.env.db-utils
+ - ../envs/.env.cloudprovider
+
+ #####################################################
+ ## OSM full planet replication
+ #####################################################
+ full-history:
+ image: osmseed-full-history:v1
+ build:
+ context: ../images/full-history
+ dockerfile: Dockerfile
+ volumes:
+ - ../data/full-history-data:/mnt/data
+ command: /start.sh
+ env_file:
+ - ../envs/.env.db
+ - ../envs/.env.db-utils
+ - ../envs/.env.cloudprovider
+
+ #####################################################
+ ## Changeset replications
+ #####################################################
+ changeset-replication-job:
+ image: osmseed-changeset-replication-job:v1
+ build:
+ context: ../images/changeset-replication-job
+ dockerfile: Dockerfile
+ volumes:
+ - ../data/changeset-replication-job-data:/mnt/changesets
+ command: >
+ /bin/bash -c "./start.sh"
+ env_file:
+ - ../envs/.env.db
+ - ../envs/.env.db-utils
+ - ../envs/.env.cloudprovider
+
+ ######################################################
+ ### NGINX container for serving files
+ ######################################################
+ planet-files:
+ image: osmseed-planet-files:v1
+ build:
+ context: ../images/planet-files
+ dockerfile: Dockerfile
+ ports:
+ - "8081:80"
+ - "3000:3000"
+ volumes:
+ # Serve minute replication files
+ - ../data/replication-job-data:/usr/share/nginx/html/server/static-files/replication/minute
+ # Serve planet dump files
+ - ../data/planet-dump-data:/usr/share/nginx/html/server/static-files/planet
+ # Serve full history planet file
+ - '.../data/full-history-data:/usr/share/nginx/html/server/static-files/full-planet'
+ - '../data/changeset-replication-job-data:/usr/share/nginx/html/server/static-files/changesets'
+ # Development mode
+ # - ./../images/planet-files/:/usr/share/nginx/html/
diff --git a/compose/replication.yml b/compose/replication.yml
deleted file mode 100644
index 6cdba218..00000000
--- a/compose/replication.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-version: '3'
-services:
- ######################################################
- ### OSM minute replication files section
- ######################################################
- replication-job:
- image: osmseed-replication-job:v1
- build:
- context: ../images/replication-job
- dockerfile: Dockerfile
- volumes:
- - ../data/replication-job-data:/mnt/data
- command: >
- /bin/bash -c " echo Sleep the replication-job for 1 minute; sleep 1m; echo Creating the replication files!; /start.sh "
- env_file:
- - ../envs/.env.db
- - ../envs/.env.db-utils
- - ../envs/.env.cloudprovider
- changeset-replication-job:
- image: osmseed-changeset-replication-job:v1
- build:
- context: ../images/changeset-replication-job
- dockerfile: Dockerfile
- volumes:
- - ../data/changeset-replication-job-data:/mnt/changesets
- # - ./../images/changeset-replication-job:/openstreetmap-changeset-replication
- command: >
- /bin/bash -c "./start.sh"
- env_file:
- - ../envs/.env.db
- - ../envs/.env.db-utils
- - ../envs/.env.cloudprovider
diff --git a/compose/web.yml b/compose/web.yml
index 1b90b4b1..ef571d6c 100644
--- a/compose/web.yml
+++ b/compose/web.yml
@@ -10,20 +10,20 @@ services:
context: ../images/db
dockerfile: Dockerfile
ports:
- - '5432:5432'
- volumes:
- - ../data/db-data:/var/lib/postgresql/data
+ - '5432:5432'
+ # volumes:
+ # - ../data/db-data:/var/lib/postgresql/data
env_file:
- - ../envs/.env.db
+ - ../envs/.env.db
web:
image: osmseed-web:v1
build:
context: ../images/web
dockerfile: Dockerfile
ports:
- - '80:80'
+ - '80:80'
env_file:
- - ../envs/.env.web
- - ../envs/.env.db
+ - ../envs/.env.web
+ - ../envs/.env.db
depends_on:
- - db
+ - db
diff --git a/images/full-history/README.md b/images/full-history/README.md
index 58900aa2..0d841e22 100644
--- a/images/full-history/README.md
+++ b/images/full-history/README.md
@@ -12,18 +12,8 @@ In order to run this container we need environment variables, these can be found
**Note**: Rename the above files as `.env.db`, `.env.db-utils` and `.env.cloudprovider`
-#### Running full-history container
-
+### Build and bring up the container
```sh
- # Docker compose
- docker-compose run full-history
-
- # Docker
- docker run \
- --env-file ./envs/.env.db \
- --env-file ./envs/.env.full-history \
- --env-file ./envs/.env.cloudprovider \
- -v ${PWD}/data/full-history-data:/mnt/data \
- --network osm-seed_default \
- -it osmseed-full-history:v1
+docker compose -f ./compose/planet.yml build
+docker compose -f ./compose/planet.yml up full-history
```
diff --git a/images/planet-dump/README.md b/images/planet-dump/README.md
index bc6f7031..bbb7ea17 100644
--- a/images/planet-dump/README.md
+++ b/images/planet-dump/README.md
@@ -12,18 +12,8 @@ In order to run this container we need environment variables, these can be found
**Note**: Rename the above files as `.env.db`, `.env.db-utils` and `.env.cloudprovider`
-#### Running planet-dump container
-
+### Build and bring up the container
```sh
- # Docker compose
- docker-compose run planet-dump
-
- # Docker
- docker run \
- --env-file ./envs/.env.db \
- --env-file ./envs/.env.planet-dump \
- --env-file ./envs/.env.cloudprovider \
- -v ${PWD}/data/planet-dump-data:/mnt/data \
- --network osm-seed_default \
- -it osmseed-planet-dump:v1
+docker compose -f ./compose/planet.yml build
+docker compose -f ./compose/planet.yml up planet-dump
```
diff --git a/images/planet-files/Dockerfile b/images/planet-files/Dockerfile
new file mode 100644
index 00000000..4f9aa907
--- /dev/null
+++ b/images/planet-files/Dockerfile
@@ -0,0 +1,10 @@
+FROM nginx:latest
+RUN apt-get update && \
+ apt-get install -y nodejs npm && \
+ rm -rf /var/lib/apt/lists/*
+WORKDIR /usr/share/nginx/html/server/
+COPY server/package.json ./
+RUN npm install
+COPY server/server.js ./
+COPY index.html /usr/share/nginx/html/
+CMD ["sh", "-c", "nginx -g 'daemon off;' & node server.js"]
diff --git a/images/planet-files/README.MD b/images/planet-files/README.MD
new file mode 100644
index 00000000..79407e01
--- /dev/null
+++ b/images/planet-files/README.MD
@@ -0,0 +1,11 @@
+
+# Planet server files
+
+This container is based on Nginx and serves data related to planet, replication, and changesets for easy access and download, similar to how it is done in OpenStreetMap.
+
+
+# Build and bring up the container
+```sh
+docker compose -f ./compose/planet.yml build
+docker compose -f ./compose/planet.yml up planet-files
+```
\ No newline at end of file
diff --git a/images/planet-files/index.html b/images/planet-files/index.html
new file mode 100644
index 00000000..b1b9a205
--- /dev/null
+++ b/images/planet-files/index.html
@@ -0,0 +1,105 @@
+
+
+
+
+
+ Planet Replication
+
+
+
+
+
+
+
diff --git a/images/planet-files/server/package.json b/images/planet-files/server/package.json
new file mode 100644
index 00000000..f91d0fca
--- /dev/null
+++ b/images/planet-files/server/package.json
@@ -0,0 +1,12 @@
+{
+ "name": "websocket-server",
+ "version": "1.0.0",
+ "main": "server.js",
+ "scripts": {
+ "start": "node server.js"
+ },
+ "dependencies": {
+ "cors": "^2.8.5",
+ "express": "^4.17.1"
+ }
+}
diff --git a/images/planet-files/server/server.js b/images/planet-files/server/server.js
new file mode 100644
index 00000000..227a23b8
--- /dev/null
+++ b/images/planet-files/server/server.js
@@ -0,0 +1,49 @@
+const express = require("express");
+const cors = require("cors");
+const fs = require("fs");
+const path = require("path");
+const app = express();
+const port = 3000;
+
+app.use(cors());
+app.get("/:directory(*)", (req, res) => {
+ const directory = req.params.directory;
+ const files = getFilesRecursive(path.join("./static-files", directory));
+ res.json(files);
+});
+
+function formatSize(sizeInBytes) {
+ const KB = 1024;
+ const MB = KB * 1024;
+ if (sizeInBytes < KB) {
+ return (sizeInBytes / KB).toFixed(1) + " kB";
+ } else if (sizeInBytes < MB) {
+ const sizeInKB = (sizeInBytes / KB).toFixed(2);
+ return sizeInKB + " kB";
+ } else {
+ const sizeInMB = (sizeInBytes / MB).toFixed(2);
+ return sizeInMB + " mB";
+ }
+ }
+
+function getFilesRecursive(dir) {
+ const files = fs.readdirSync(dir);
+ return files.map((file) => {
+ const filePath = path.join(dir, file);
+ const stats = fs.statSync(filePath);
+ const isDirectory = stats.isDirectory();
+ const size = isDirectory ? null : formatSize(stats.size);
+ const creationDate = isDirectory ? null : stats.mtime.toISOString();
+ return {
+ name: file,
+ isDirectory,
+ path: filePath,
+ size,
+ creationDate,
+ };
+ });
+}
+
+app.listen(port, () => {
+ console.log(`Server is listening at http://localhost:${port}`);
+});
diff --git a/images/replication-job/Dockerfile b/images/replication-job/Dockerfile
index 2b90ac07..8683fb42 100644
--- a/images/replication-job/Dockerfile
+++ b/images/replication-job/Dockerfile
@@ -1,7 +1,12 @@
FROM developmentseed/osmseed-osm-processor:v2
+# Install Nginx
+RUN apt-get update && \
+ apt-get install -y nginx && \
+ rm -rf /var/lib/apt/lists/*
+
RUN pip3 install boto3
-COPY ./start.sh /
+COPY ./*.sh /
COPY monitoring.py /
WORKDIR /mnt/data
CMD /start.sh
diff --git a/images/replication-job/start.sh b/images/replication-job/start.sh
index 0435ca85..5653eb7a 100755
--- a/images/replication-job/start.sh
+++ b/images/replication-job/start.sh
@@ -11,44 +11,101 @@ fi
workingDirectory="/mnt/data"
mkdir -p $workingDirectory
-# Check if state.txt exist in the workingDirectory,
-# in case the file does not exist locally and does not exist in the cloud the replication will start from 0
-if [ ! -f $workingDirectory/state.txt ]; then
- echo "File $workingDirectory/state.txt does not exist in local storage"
- ### AWS
- if [ $CLOUDPROVIDER == "aws" ]; then
- aws s3 ls $AWS_S3_BUCKET/$REPLICATION_FOLDER/state.txt
- if [[ $? -eq 0 ]]; then
- echo "File exist, let's get it from $CLOUDPROVIDER - $AWS_S3_BUCKET"
- aws s3 cp $AWS_S3_BUCKET/$REPLICATION_FOLDER/state.txt $workingDirectory/state.txt
+# Remove files that are not required
+[ -e /mnt/data/replicate.lock ] && rm -f /mnt/data/replicate.lock
+# [ -e /mnt/data/processed_files.log ] && rm -f /mnt/data/processediles.log
+
+function get_current_state_file() {
+ # Check if state.txt exist in the workingDirectory,
+ # in case the file does not exist locally and does not exist in the cloud the replication will start from 0
+ if [ ! -f $workingDirectory/state.txt ]; then
+ echo "File $workingDirectory/state.txt does not exist in local storage"
+ ### AWS
+ if [ $CLOUDPROVIDER == "aws" ]; then
+ aws s3 ls $AWS_S3_BUCKET/$REPLICATION_FOLDER/state.txt
+ if [[ $? -eq 0 ]]; then
+ echo "File exist, let's get it from $CLOUDPROVIDER - $AWS_S3_BUCKET"
+ aws s3 cp $AWS_S3_BUCKET/$REPLICATION_FOLDER/state.txt $workingDirectory/state.txt
+ fi
fi
- fi
-
- ### GCP
- if [ $CLOUDPROVIDER == "gcp" ]; then
- gsutil ls $GCP_STORAGE_BUCKET/$REPLICATION_FOLDER/state.txt
- if [[ $? -eq 0 ]]; then
- echo "File exist, let's get it from $CLOUDPROVIDER - $GCP_STORAGE_BUCKET"
- gsutil cp $GCP_STORAGE_BUCKET/$REPLICATION_FOLDER/state.txt $workingDirectory/state.txt
+
+ ### GCP
+ if [ $CLOUDPROVIDER == "gcp" ]; then
+ gsutil ls $GCP_STORAGE_BUCKET/$REPLICATION_FOLDER/state.txt
+ if [[ $? -eq 0 ]]; then
+ echo "File exist, let's get it from $CLOUDPROVIDER - $GCP_STORAGE_BUCKET"
+ gsutil cp $GCP_STORAGE_BUCKET/$REPLICATION_FOLDER/state.txt $workingDirectory/state.txt
+ fi
fi
- fi
-
- ### Azure
- if [ $CLOUDPROVIDER == "azure" ]; then
- state_file_exists=$(az storage blob exists --container-name $AZURE_CONTAINER_NAME --name $REPLICATION_FOLDER/state.txt --query="exists")
- if [[ $state_file_exists=="true" ]]; then
- echo "File exist, let's get it from $CLOUDPROVIDER - $AZURE_CONTAINER_NAME"
- az storage blob download \
- --container-name $AZURE_CONTAINER_NAME \
- --name $REPLICATION_FOLDER/state.txt \
- --file $workingDirectory/state.txt --query="name"
+
+ ### Azure
+ if [ $CLOUDPROVIDER == "azure" ]; then
+ state_file_exists=$(az storage blob exists --container-name $AZURE_CONTAINER_NAME --name $REPLICATION_FOLDER/state.txt --query="exists")
+ if [[ $state_file_exists=="true" ]]; then
+ echo "File exist, let's get it from $CLOUDPROVIDER - $AZURE_CONTAINER_NAME"
+ az storage blob download \
+ --container-name $AZURE_CONTAINER_NAME \
+ --name $REPLICATION_FOLDER/state.txt \
+ --file $workingDirectory/state.txt --query="name"
+ fi
fi
fi
- mkdir -p $workingDirectory
-fi
+}
+
+function upload_file_cloud() {
+ # Upload files to cloud provider
+ local local_file="$1"
+ local cloud_file="$REPLICATION_FOLDER/${local_file#*"$workingDirectory/"}"
+ echo "$(date +%F_%H:%M:%S): Upload file $local_file to ...$CLOUDPROVIDER...$cloud_file"
+ if [ "$CLOUDPROVIDER" == "aws" ]; then
+ aws s3 cp "$local_file" "$AWS_S3_BUCKET/$cloud_file" --acl public-read
+ elif [ "$CLOUDPROVIDER" == "gcp" ]; then
+ gsutil cp -a public-read "$local_file" "$GCP_STORAGE_BUCKET/$cloud_file"
+ elif [ "$CLOUDPROVIDER" == "azure" ]; then
+ az storage blob upload \
+ --container-name "$AZURE_CONTAINER_NAME" \
+ --file "$local_file" \
+ --name "$cloud_file" \
+ --output none
+ fi
+}
+
+function monitor_minute_replication() {
+ # Function to handle continuous monitoring, minutminutes replication and upload to cloud provider
+ # Directory to store a log of processed files
+ processed_files_log="$workingDirectory/processed_files.log"
+ max_log_size_mb=1
+
+ while true; do
+ upload_file_cloud /mnt/data/state.txt
+ sleep 60s
+ done &
-# Creating the replication files
-function generateReplication() {
+ while true; do
+ if [ -e "$processed_files_log" ]; then
+ log_size=$(du -m "$processed_files_log" | cut -f1)
+ if [ "$log_size" -gt "$max_log_size_mb" ]; then
+ echo $(date +%F_%H:%M:%S)": Cleaning processed_files_log..." >"$processed_files_log"
+ fi
+ for local_minute_file in $(find $workingDirectory/ -cmin -1); do
+ if [ -f "$local_minute_file" ]; then
+ if grep -q "$local_minute_file" "$processed_files_log"; then
+ continue
+ fi
+ upload_file_cloud $local_minute_file
+ echo "$local_minute_file" >>"$processed_files_log"
+ fi
+ done
+ else
+ echo "File $processed_files_log not found."
+ echo $processed_files_log >$processed_files_log
+ fi
+ sleep 10s
+ done
+}
+
+function generate_replication() {
+ # Replicate the API database using Osmosis
osmosis -q \
--replicate-apidb \
iterations=0 \
@@ -60,45 +117,32 @@ function generateReplication() {
password=$POSTGRES_PASSWORD \
validateSchemaVersion=no \
--write-replication \
- workingDirectory=$workingDirectory &
- while true; do
- for local_file in $(find $workingDirectory/ -cmin -1); do
- if [ -f "$local_file" ]; then
-
- cloud_file=$REPLICATION_FOLDER/${local_file#*"$workingDirectory/"}
- echo $(date +%F_%H:%M:%S)": Copy file...$local_file to $cloud_file"
-
- ### AWS
- if [ $CLOUDPROVIDER == "aws" ]; then
- aws s3 cp $local_file $AWS_S3_BUCKET/$cloud_file --acl public-read
- fi
-
- ### GCP
- if [ $CLOUDPROVIDER == "gcp" ]; then
- #TODO, emable public acces
- gsutil cp -a public-read $local_file $GCP_STORAGE_BUCKET/$cloud_file
- fi
-
- ### Azure
- if [ $CLOUDPROVIDER == "azure" ]; then
- #TODO, emable public acces
- az storage blob upload \
- --container-name $AZURE_CONTAINER_NAME \
- --file $local_file \
- --name $cloud_file \
- --output none
- fi
- fi
- done
- sleep 15s
- done
+ workingDirectory=$workingDirectory
}
-# Check if Postgres is ready
+# function start_nginx() {
+# if [ "$STAR_NGINX_SERVER" = "true" ]; then
+# echo 'server {
+# listen 8080;
+# server_name localhost;
+
+# location / {
+# root /mnt/data;
+# index index.html;
+# }
+# }' >/etc/nginx/nginx.conf
+# service nginx restart
+# else
+# echo "STAR_NGINX_SERVER is either not set or not set to true."
+# fi
+# }
+
+######################## Start minutes replication process ########################
+get_current_state_file
flag=true
while "$flag" = true; do
pg_isready -h $POSTGRES_HOST -p 5432 >/dev/null 2>&2 || continue
- # Change flag to false to stop ping the DB
flag=false
- generateReplication
+ generate_replication &
+ monitor_minute_replication
done
diff --git a/osm-seed/values.yaml b/osm-seed/values.yaml
index b9bfc19d..122c95e3 100644
--- a/osm-seed/values.yaml
+++ b/osm-seed/values.yaml
@@ -755,7 +755,7 @@ changesetReplicationJob:
# Variables for osmcha web builder
# ====================================================================================================
osmchaWeb:
- enabled: true
+ enabled: false
image:
name: ""
tag: ""
@@ -763,7 +763,7 @@ osmchaWeb:
# Variables for osmcha Api
# ====================================================================================================
osmchaApi:
- enabled: true
+ enabled: false
image:
name: "ghcr.io/willemarcel/osmcha-django"
tag: "10d7f6748d9e55d439339ed2c35509db15f782a6"
@@ -804,7 +804,7 @@ osmchaApi:
# Variables for osmcha DB
# ====================================================================================================
osmchaDb:
- enabled: true
+ enabled: false
image:
name: ""
tag: ""
@@ -835,3 +835,12 @@ osmchaDb:
GCP_gcePersistentDisk_size: 50Gi
nodeSelector:
enabled: false
+
+# ====================================================================================================
+# Planet server files
+# ====================================================================================================
+planetFiles:
+ enabled: false
+ image:
+ name: ""
+ tag: ""