diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..635c94b0f --- /dev/null +++ b/404.html @@ -0,0 +1,2092 @@ + + + + + + + + + + + + + + + + + + + + + IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Accessing-your-Device-from-the-internet/index.html b/Accessing-your-Device-from-the-internet/index.html new file mode 100644 index 000000000..8f1a138c1 --- /dev/null +++ b/Accessing-your-Device-from-the-internet/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Backup-and-Restore/index.html b/Backup-and-Restore/index.html new file mode 100644 index 000000000..7ec593a62 --- /dev/null +++ b/Backup-and-Restore/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Backups.md.old b/Backups.md.old new file mode 100644 index 000000000..c220ed2ef --- /dev/null +++ b/Backups.md.old @@ -0,0 +1,113 @@ +# Backups +Because containers can easily be rebuilt from docker hub we only have to back up the data in the "volumes" directory. + +## Cloud Backups +### Dropbox-Uploader +This a great utility to easily upload data from your Pi to the cloud. https://magpi.raspberrypi.org/articles/dropbox-raspberry-pi. It can be installed from the Menu under Backups. +### rclone (Google Drive) +This is a service to upload to Google Drive. The config is described [here]( https://medium.com/@artur.klauser/mounting-google-drive-on-raspberry-pi-f5002c7095c2). Install it from the menu then follow the link for these sections: +* Getting a Google Drive Client ID +* Setting up the Rclone Configuration + +When naming the service in `rclone config` ensure to call it "gdrive" + +**The Auto-mounting instructions for the drive in the link don't work on Rasbian**. Auto-mounting of the drive isn't necessary for the backup script. + +If you want your Google Drive to mount on every boot then follow the instructions at the bottom of the wiki page + + +## Influxdb +`~/IOTstack/scripts/backup_influxdb.sh` does a database snapshot and stores it in ~/IOTstack/backups/influxdb/db . This can be restored with the help a script (that I still need to write) + +## Docker backups +The script `~/IOTstack/scripts/docker_backup.sh` performs the master backup for the stack. + +This script can be placed in a cron job to backup on a schedule. +Edit the crontab with`crontab -e` +Then add `0 23 * * * ~/IOTstack/scripts/docker_backup.sh >/dev/null 2>&1` to have a backup every night at 23:00. + +This script cheats by copying the volume folder live. The correct way would be to stop the stack first then copy the volumes and restart. The cheating method shouldn't be a problem unless you have fast changing data like in influxdb. This is why the script makes a database export of influxdb and ignores its volume. + +### Cloud integration +The docker_backup.sh script now no longer requires modification to enable cloud backups. It now tests for the presence of and enable file in the backups folder +#### Drobox-Uploader +The backup tests for a file called `~/IOTstack/backups/dropbox`, if it is present it will upload to dropbox. To disable dropbox upload delete the file. To enable run `sudo touch ~/IOTstack/backups/dropbox` +#### rclone +The backup tests for a file called `~/IOTstack/backups/rclone`, if it is present it will upload to google drive. To disable rclone upload delete the file. To enable run `sudo touch ~/IOTstack/backups/rclone` + +#### Pruning online backups +@877dev has added functionality to prune both local and cloud backups. For dropbox make sure you dont have any files that contain spaces in your backup directory as the script cannot handle it at this time. + +### Restoring a backup +The "volumes" directory contains all the persistent data necessary to recreate the container. The docker-compose.yml and the environment files are optional as they can be regenerated with the menu. Simply copy the volumes directory into the IOTstack directory, Rebuild the stack and start. + +## Added your Dropbox token incorrectly or aborted the install at the token screen + +Make sure you are running the latest version of the project [link](https://sensorsiot.github.io/IOTstack/Updating-the-Project/). + +Run `~/Dropbox-Uploader/dropbox_uploader.sh unlink` and if you have added it key then it will prompt you to confirm its removal. If no key was found it will ask you for a new key. + +Confirm by running `~/Dropbox-Uploader/dropbox_uploader.sh` it should ask you for your key if you removed it or show you the following prompt if it has the key: + +``` console + $ ~/Dropbox-Uploader/dropbox_uploader.sh +Dropbox Uploader v1.0 +Andrea Fabrizi - andrea.fabrizi@gmail.com + +Usage: /home/pi/Dropbox-Uploader/dropbox_uploader.sh [PARAMETERS] COMMAND... + +Commands: + upload + download [LOCAL_FILE/DIR] + delete + move + copy + mkdir +.... + +``` + +Ensure you **are not** running as sudo as this will store your api in the /root directory as `/root/.dropbox_uploader` + +If you ran the command with sudo the remove the old token file if it exists with either `sudo rm /root/.dropbox_uploader` or `sudo ~/Dropbox-Uploader/dropbox_uploader.sh unlink` + +## Auto-mount Gdrive with rclone + +To enable rclone to mount on boot you will need to make a user service. Run the following commands + +``` console +$ mkdir -p ~/.config/systemd/user +$ nano ~/.config/systemd/user/gdrive.service +``` +Copy the following code into the editor, save and exit + +``` +[Unit] +Description=rclone: Remote FUSE filesystem for cloud storage +Documentation=man:rclone(1) + +[Service] +Type=notify +ExecStartPre=/bin/mkdir -p %h/mnt/gdrive +ExecStart= \ + /usr/bin/rclone mount \ + --fast-list \ + --vfs-cache-mode writes \ + gdrive: %h/mnt/gdrive + +[Install] +WantedBy=default.target +``` +enable it to start on boot with: (no sudo) +``` console +$ systemctl --user enable gdrive.service +``` +start with +``` console +$ systemctl --user start gdrive.service +``` +if you no longer want it to start on boot then type: +``` console +$ systemctl --user disable gdrive.service +``` + diff --git a/Basic_setup/Accessing-your-Device-from-the-internet/index.html b/Basic_setup/Accessing-your-Device-from-the-internet/index.html new file mode 100644 index 000000000..ee3b05d23 --- /dev/null +++ b/Basic_setup/Accessing-your-Device-from-the-internet/index.html @@ -0,0 +1,2525 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Accessing your device from the internet - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Accessing your device from the internet

+

The challenge most of us face with remotely accessing our home networks is that our routers usually have a dynamically-allocated IP address on the public (WAN) interface.

+

From time to time the IP address that your ISP assigns changes and it's difficult to keep up. Fortunately, there is a solution: Dynamic DNS. The section below shows you how to set up an easy-to-remember domain name that follows your public IP address no matter when it changes.

+

Secondly, how do you get into your home network? Your router has a firewall that is designed to keep the rest of the internet out of your network to protect you. The solution to that is a Virtual Private Network (VPN) or "tunnel".

+

Dynamic DNS

+

There are two parts to a Dynamic DNS service:

+
    +
  1. You have to register with a Dynamic DNS service provider and obtain a domain name that is not already taken by someone else.
  2. +
  3. Something on your side of the network needs to propagate updates so that your chosen domain name remains in sync with your router's dynamically-allocated public IP address.
  4. +
+

Register with a Dynamic DNS service provider

+

The first part is fairly simple and there are quite a few Dynamic DNS service providers including:

+ +
+

You can find more service providers by Googling "Dynamic DNS service".

+
+

Some router vendors also provide their own built-in Dynamic DNS capabilities for registered customers so it's a good idea to check your router's capabilities before you plough ahead.

+

Dynamic DNS propagation

+

The "something" on your side of the network propagating WAN IP address changes can be either:

+
    +
  • your router; or
  • +
  • a "behind the router" technique, typically a periodic job running on the same Raspberry Pi that is hosting IOTstack and WireGuard.
  • +
+

If you have the choice, your router is to be preferred. That's because your router is usually the only device in your network that actually knows when its WAN IP address changes. A Dynamic DNS client running on your router will propagate changes immediately and will only transmit updates when necessary. More importantly, it will persist through network interruptions or Dynamic DNS service provider outages until it receives an acknowledgement that the update has been accepted.

+

Nevertheless, your router may not support the Dynamic DNS service provider you wish to use, or may come with constraints that you find unsatisfactory so any behind-the-router technique is always a viable option, providing you understand its limitations.

+

A behind-the-router technique usually relies on sending updates according to a schedule. An example is a cron job that runs every five minutes. That means any router WAN IP address changes won't be propagated until the next scheduled update. In the event of network interruptions or service provider outages, it may take close to ten minutes before everything is back in sync. Moreover, given that WAN IP address changes are infrequent events, most scheduled updates will be sending information unnecessarily.

+

DuckDNS container

+

The recommended and easiest solution is to install the Duckdns docker-container +from the menu. It includes the cron service and logs are handled by Docker.

+

For configuration see Containers/Duck DNS.

+
+

Note

+

This is a recently added container, please don't hesitate to report any +possible faults to Discord or as Github issues.

+
+

DuckDNS client script

+
+

Info

+

This method will soon be deprecated in favor of the DuckDNS container.

+
+

IOTstack provides a solution for DuckDNS. The best approach to running it is:

+
$ mkdir -p ~/.local/bin
+$ cp ~/IOTstack/duck/duck.sh ~/.local/bin
+
+
+

The reason for recommending that you make a copy of duck.sh is because the "original" is under Git control. If you change the "original", Git will keep telling you that the file has changed and it may block incoming updates from GitHub.

+
+

Then edit ~/.local/bin/duck.sh to add your DuckDNS domain name(s) and token:

+
DOMAINS="YOURS.duckdns.org"
+DUCKDNS_TOKEN="YOUR_DUCKDNS_TOKEN"
+
+

For example:

+
DOMAINS="downunda.duckdns.org"
+DUCKDNS_TOKEN="8a38f294-b5b6-4249-b244-936e997c6c02"
+
+

Note:

+
    +
  • +

    The DOMAINS= variable can be simplified to just "YOURS", with the .duckdns.org portion implied, as in:

    +
    DOMAINS="downunda"
    +
    +
  • +
+

Once your credentials are in place, test the result by running:

+
$ ~/.local/bin/duck.sh
+ddd, dd mmm yyyy hh:mm:ss ±zzzz - updating DuckDNS
+OK
+
+

The timestamp is produced by the duck.sh script. The expected responses from the DuckDNS service are:

+
    +
  • "OK" - indicating success; or
  • +
  • "KO" - indicating failure.
  • +
+

Check your work if you get "KO" or any other errors.

+

Next, assuming dig is installed on your Raspberry Pi (sudo apt install dnsutils), you can test propagation by sending a directed query to a DuckDNS name server. For example, assuming the domain name you registered was downunda.duckdns.org, you would query like this:

+
$ dig @ns1.duckdns.org downunda.duckdns.org +short
+
+

The expected result is the IP address of your router's WAN interface. It is a good idea to confirm that it is the same as you get from whatismyipaddress.com.

+

A null result indicates failure so check your work.

+

Remember, the Domain Name System is a distributed database. It takes time for changes to propagate. The response you get from directing a query to ns1.duckdns.org may not be the same as the response you get from any other DNS server. You often have to wait until cached records expire and a recursive query reaches the authoritative DuckDNS name-servers.

+

Running the DuckDNS client automatically

+

The recommended arrangement for keeping your Dynamic DNS service up-to-date is to invoke duck.sh from cron at five minute intervals.

+

If you are new to cron, see these guides for more information about setting up and editing your crontab:

+ +

A typical crontab will look like this:

+
SHELL=/bin/bash
+HOME=/home/pi
+PATH=/home/pi/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+*/5 *   *   *   *   duck.sh >/dev/null 2>&1
+
+

The first three lines construct the runtime environment correctly and should be at the start of any crontab.

+

The last line means "run duck.sh every five minutes". See crontab.guru if you want to understand the syntax of the last line.

+

When launched in the background by cron, the script supplied with IOTstack adds a random delay of up to one minute to try to reduce the "hammering effect" of a large number of users updating DuckDNS simultaneously.

+

Standard output and standard error are redirected to /dev/null which is appropriate in this instance. When DuckDNS is working correctly (which is most of the time), the only output from the curl command is "OK". Logging that every five minutes would add wear and tear to SD cards for no real benefit.

+

If you suspect DuckDNS is misbehaving, you can run the duck.sh command from a terminal session, in which case you will see all the curl output in the terminal window.

+

If you wish to keep a log of duck.sh activity, the following will get the job done:

+
    +
  1. +

    Make a directory to hold log files:

    +
    $ mkdir -p ~/Logs
    +
    +
  2. +
  3. +

    Edit the last line of the crontab like this:

    +
    */5 *   *   *   *   duck.sh >>./Logs/duck.log 2>&1
    +
    +
  4. +
+

Remember to prune the log from time to time. The generally-accepted approach is:

+
$ cat /dev/null >~/Logs/duck.log
+
+

Virtual Private Network

+

WireGuard

+

WireGuard is supplied as part of IOTstack. See WireGuard documentation.

+

PiVPN

+

pimylifeup.com has an excellent tutorial on how to install PiVPN

+

In point 17 and 18 they mention using noip for their dynamic DNS. Here you can use the DuckDNS address if you created one.

+

Don't forget you need to open the port 1194 on your firewall. Most people won't be able to VPN from inside their network so download OpenVPN client for your mobile phone and try to connect over mobile data. (More info.)

+

Once you activate your VPN (from your phone/laptop/work computer) you will effectively be on your home network and you can access your devices as if you were on the wifi at home.

+

I personally use the VPN any time I'm on public wifi, all your traffic is secure.

+

Zerotier

+

https://www.zerotier.com/

+

Zerotier is an alternative to PiVPN that doesn't require port forwarding on your router. It does however require registering for their free tier service here.

+

Kevin Zhang has written a how to guide here. Just note that the install link is outdated and should be:

+
$ curl -s 'https://raw.githubusercontent.com/zerotier/ZeroTierOne/master/doc/contact%40zerotier.com.gpg' | gpg --import && \
+if z=$(curl -s 'https://install.zerotier.com/' | gpg); then echo "$z" | sudo bash; fi
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Basic_setup/Backup-and-Restore/index.html b/Basic_setup/Backup-and-Restore/index.html new file mode 100644 index 000000000..677e23da8 --- /dev/null +++ b/Basic_setup/Backup-and-Restore/index.html @@ -0,0 +1,2602 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Backing up and restoring IOTstack - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Backing up and restoring IOTstack

+

This page explains how to use the backup and restore functionality of IOTstack.

+

Backup

+

The backup command can be executed from IOTstack's menu, or from a cronjob.

+

Running backup

+

To ensure that all your data is saved correctly, the stack should be brought down. This is mainly due to databases potentially being in a state that could cause data loss.

+

There are 2 ways to run backups:

+
    +
  • From the menu: Backup and Restore > Run backup
  • +
  • Running the following command: bash ./scripts/backup.sh
  • +
+

The command that's run from the command line can also be executed from a cronjob:

+

0 2 * * * cd /home/pi/IOTstack && /bin/bash ./scripts/backup.sh

+

The current directory of bash must be in IOTstack's directory, to ensure that it can find the relative paths of the files it's meant to back up. In the example above, it's assume that it's inside the pi user's home directory.

+

Arguments

+
./scripts/backup.sh {TYPE=3} {USER=$(whoami)}
+
+
    +
  • Types:
  • +
  • 1 = Backup with Date
      +
    • A tarball file will be created that contains the date and time the backup was started, in the filename.
    • +
    +
  • +
  • 2 = Rolling Date
      +
    • A tarball file will be created that contains the day of the week (0-6) the backup was started, in the filename.
    • +
    • If a tarball already exists with the same name, it will be overwritten.
    • +
    +
  • +
  • 3 = Both
  • +
  • User: + This parameter only becomes active if run as root. This script will default to the current logged in user + If this parameter is not supplied when run as root, the script will ask for the username as input
  • +
+

Backups:

+
    +
  • You can find the backups in the ./backups/ folder. With rolling being in ./backups/rolling/ and date backups in ./backups/backup/
  • +
  • Log files can also be found in the ./backups/logs/ directory.
  • +
+

Examples:

+
    +
  • ./scripts/backup.sh
  • +
  • ./scripts/backup.sh 3
  • +
+

Either of these will run both backups.

+
    +
  • ./scripts/backup.sh 2
  • +
+

This will only produce a backup in the rollowing folder. It will be called 'backup_XX.tar.gz' where XX is the current day of the week (as an int)

+
    +
  • sudo bash ./scripts/backup.sh 2 pi
  • +
+

This will only produce a backup in the rollowing folder and change all the permissions to the 'pi' user.

+

Restore

+

There are 2 ways to run a restore:

+
    +
  • From the menu: Backup and Restore > Restore from backup
  • +
  • Running the following command: bash ./scripts/restore.sh
  • +
+

Important: The restore script assumes that the IOTstack directory is fresh, as if it was just cloned. If it is not fresh, errors may occur, or your data may not correctly be restored even if no errors are apparent.

+

Note: It is suggested that you test that your backups can be restored after initially setting up, and anytime you add or remove a service. Major updates to services can also break backups.

+

Arguments

+

./scripts/restore.sh {FILENAME=backup.tar.gz} {noask}
+
+The restore script takes 2 arguments:

+
    +
  • Filename: The name of the backup file. The file must be present in the ./backups/ directory, or a subfolder in it. That means it should be moved from ./backups/backup to ./backups/, or that you need to specify the backup portion of the directory (see examples)
  • +
  • NoAsk: If a second parameter is present, is acts as setting the no ask flag to true.
  • +
+

Pre and post script hooks

+

The script checks if there are any pre and post back up hooks to execute commands. Both of these files will be included in the backup, and have also been added to the .gitignore file, so that they will not be touched when IOTstack updates.

+

Prebackup script hook

+

The prebackup hook script is executed before any compression happens and before anything is written to the temporary backup manifest file (./.tmp/backup-list_{{NAME}}.txt). It can be used to prepare any services (such as databases that IOTstack isn't aware of) for backing up.

+

To use it, simple create a ./pre_backup.sh file in IOTstack's main directory. It will be executed next time a backup runs.

+

Postbackup script hook

+

The postbackup hook script is executed after the tarball file has been written to disk, and before the final backup log information is written to disk.

+

To use it, simple create a ./post_backup.sh file in IOTstack's main directory. It will be executed after the next time a backup runs.

+

Post restore script hook

+

The post restore hook script is executed after all files have been extracted and written to disk. It can be used to apply permissions that your custom services may require.

+

To use it, simple create a ./post_restore.sh file in IOTstack's main directory. It will be executed after a restore happens.

+

Third party integration

+

This section explains how to backup your files with 3rd party software.

+

Dropbox

+

Coming soon.

+

Google Drive

+

Coming soon.

+

rsync

+

Coming soon.

+

Duplicati

+

Coming soon.

+

SFTP

+

Coming soon.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Basic_setup/Custom/index.html b/Basic_setup/Custom/index.html new file mode 100644 index 000000000..3a518ce71 --- /dev/null +++ b/Basic_setup/Custom/index.html @@ -0,0 +1,2533 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Custom overrides - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Custom overrides

+

Each time you build the stack from the menu, the Docker Compose file +docker-compose.yml is recreated, losing any custom changes you've made. There +are different ways of dealing with this:

+
    +
  1. Not using the menu after you've made changes. Do remember to backup your + customized docker-compose.yml, in case you overwrite it by mistake or + habit from the menu.
  2. +
  3. Use the Docker Compose inbuilt override mechanism by creating a file named + docker-compose.override.yml. This limits you to changing values and + appending to lists already present in your docker-compose.yml, but it's + handy as changes are immediately picked up by docker-compose commands. To + see the resulting final config run docker-compose config.
  4. +
  5. IOTstack menu, in the default master-branch, implements a mechanism to + merge the yaml file compose-override.yml with the menu-generated stack + into docker-compose.yml. This can be used to add even complete new + services. See below for details.
  6. +
  7. This is not an actual extension mechanism, but well worth mentioning: If + you need a new services that doesn't communicate with the services in + IOTstack, create it completely separately and independently into its own + folder, e.g. ~/customStack/docker-compose.yml. This composition can then + be independently managed from that folder: cd ~/customStack and use + docker-compose commands as normal. The best override is the one you don't + have to make.
  8. +
+

Custom services and overriding default settings for IOTstack

+

You can specify modifcations to the docker-compose.yml file, including your own networks and custom containers/services.

+

Create a file called compose-override.yml in the main directory, and place your modifications into it. These changes will be merged into the docker-compose.yml file next time you run the build script.

+

The compose-override.yml file has been added to the .gitignore file, so it shouldn't be touched when upgrading IOTstack. It has been added to the backup script, and so will be included when you back up and restore IOTstack. Always test your backups though! New versions of IOTstack may break previous builds.

+

How it works

+
    +
  1. After the build process has been completed, a temporary docker compose file is created in the tmp directory.
  2. +
  3. The script then checks if compose-override.yml exists:
      +
    • If it exists, then continue to step 3
    • +
    • If it does not exist, copy the temporary docker compose file to the main directory and rename it to docker-compose.yml.
    • +
    +
  4. +
  5. Using the yaml_merge.py script, merge both the compose-override.yml and the temporary docker compose file together; Using the temporary file as the default values and interating through each level of the yaml structure, check to see if the compose-override.yml has a value set.
  6. +
  7. Output the final file to the main directory, calling it docker-compose.yml.
  8. +
+

A word of caution

+

If you specify an override for a service, and then rebuild the docker-compose.yml file, but deselect the service from the list, then the YAML merging will still produce that override.

+

For example, lets say NodeRed was selected to have have the following override specified in compose-override.yml: +

services:
+  nodered:
+    restart: always
+

+

When rebuilding the menu, ensure to have NodeRed service always included because if it's no longer included, the only values showing in the final docker-compose.yml file for NodeRed will be the restart key and its value. Docker Compose will error with the following message:

+

Service nodered has neither an image nor a build context specified. At least one must be provided.

+

When attempting to bring the services up with docker-compose up -d.

+

Either remove the override for NodeRed in compose-override.yml and rebuild the stack, or ensure that NodeRed is built with the stack to fix this.

+

Examples

+

Overriding default settings

+

Lets assume you put the following into the compose-override.yml file: +

services:
+  mosquitto:
+    ports:
+      - 1996:1996
+      - 9001:9001
+

+

Normally the mosquitto service would be built like this inside the docker-compose.yml file: +

version: '3.6'
+services:
+  mosquitto:
+    container_name: mosquitto
+    image: eclipse-mosquitto
+    restart: unless-stopped
+    user: "1883"
+    ports:
+      - 1883:1883
+      - 9001:9001
+    volumes:
+      - ./volumes/mosquitto/data:/mosquitto/data
+      - ./volumes/mosquitto/log:/mosquitto/log
+      - ./volumes/mosquitto/pwfile:/mosquitto/pwfile
+      - ./services/mosquitto/mosquitto.conf:/mosquitto/config/mosquitto.conf
+      - ./services/mosquitto/filter.acl:/mosquitto/config/filter.acl
+

+

Take special note of the ports list.

+

If you run the build script with the compose-override.yml file in place, and open up the final docker-compose.yml file, you will notice that the port list have been replaced with the ones you specified in the compose-override.yml file. +

version: '3.6'
+services:
+  mosquitto:
+    container_name: mosquitto
+    image: eclipse-mosquitto
+    restart: unless-stopped
+    user: "1883"
+    ports:
+      - 1996:1996
+      - 9001:9001
+    volumes:
+      - ./volumes/mosquitto/data:/mosquitto/data
+      - ./volumes/mosquitto/log:/mosquitto/log
+      - ./volumes/mosquitto/pwfile:/mosquitto/pwfile
+      - ./services/mosquitto/mosquitto.conf:/mosquitto/config/mosquitto.conf
+      - ./services/mosquitto/filter.acl:/mosquitto/config/filter.acl
+

+

Do note that it will replace the entire list, if you were to specify +

services:
+  mosquitto:
+    ports:
+      - 1996:1996
+

+

Then the final output will be: +

version: '3.6'
+services:
+  mosquitto:
+    container_name: mosquitto
+    image: eclipse-mosquitto
+    restart: unless-stopped
+    user: "1883"
+    ports:
+      - 1996:1996
+    volumes:
+      - ./volumes/mosquitto/data:/mosquitto/data
+      - ./volumes/mosquitto/log:/mosquitto/log
+      - ./volumes/mosquitto/pwfile:/mosquitto/pwfile
+      - ./services/mosquitto/mosquitto.conf:/mosquitto/config/mosquitto.conf
+      - ./services/mosquitto/filter.acl:/mosquitto/config/filter.acl
+

+

Using env files instead of docker-compose variables

+

If you need or prefer to use *.env files for docker-compose environment variables in a separate file instead of using overrides, you can do so like this:

+
services:
+  grafana:
+    env_file:
+      - ./services/grafana/grafana.env
+    environment:
+
+

This will remove the default environment variables set in the template, and tell docker-compose to use the variables specified in your file. It is not mandatory that the .env file be placed in the service's service directory, but is strongly suggested. Keep in mind the PostBuild Script functionality to automatically copy your .env files into their directories on successful build if you need to.

+

Adding custom services

+

Custom services can be added in a similar way to overriding default settings for standard services. Lets add a Minecraft and rcon server to IOTstack. +Firstly, put the following into compose-override.yml: +

services:
+  mosquitto:
+    ports:
+      - 1996:1996
+      - 9001:9001
+  minecraft:
+    image: itzg/minecraft-server
+    ports:
+      - "25565:25565"
+    volumes:
+      - "./volumes/minecraft:/data"
+    environment:
+      EULA: "TRUE"
+      TYPE: "PAPER"
+      ENABLE_RCON: "true"
+      RCON_PASSWORD: "PASSWORD"
+      RCON_PORT: 28016
+      VERSION: "1.15.2"
+      REPLACE_ENV_VARIABLES: "TRUE"
+      ENV_VARIABLE_PREFIX: "CFG_"
+      CFG_DB_HOST: "http://localhost:3306"
+      CFG_DB_NAME: "IOTstack Minecraft"
+      CFG_DB_PASSWORD_FILE: "/run/secrets/db_password"
+    restart: unless-stopped
+  rcon:
+    image: itzg/rcon
+    ports:
+      - "4326:4326"
+      - "4327:4327"
+    volumes:
+      - "./volumes/rcon_data:/opt/rcon-web-admin/db"
+secrets:
+  db_password:
+    file: ./db_password
+

+

Then create the service directory that the new instance will use to store persistant data:

+

mkdir -p ./volumes/minecraft

+

and

+

mkdir -p ./volumes/rcon_data

+

Obviously you will need to give correct folder names depending on the volumes you specify for your custom services. If your new service doesn't require persistant storage, then you can skip this step.

+

Then simply run the ./menu.sh command, and rebuild the stack with what ever services you had before.

+

Using the Mosquitto example above, the final docker-compose.yml file will look like:

+
version: '3.6'
+services:
+  mosquitto:
+    ports:
+    - 1996:1996
+    - 9001:9001
+    container_name: mosquitto
+    image: eclipse-mosquitto
+    restart: unless-stopped
+    user: '1883'
+    volumes:
+    - ./volumes/mosquitto/data:/mosquitto/data
+    - ./volumes/mosquitto/log:/mosquitto/log
+    - ./services/mosquitto/mosquitto.conf:/mosquitto/config/mosquitto.conf
+    - ./services/mosquitto/filter.acl:/mosquitto/config/filter.acl
+  minecraft:
+    image: itzg/minecraft-server
+    ports:
+    - 25565:25565
+    volumes:
+    - ./volumes/minecraft:/data
+    environment:
+      EULA: 'TRUE'
+      TYPE: PAPER
+      ENABLE_RCON: 'true'
+      RCON_PASSWORD: PASSWORD
+      RCON_PORT: 28016
+      VERSION: 1.15.2
+      REPLACE_ENV_VARIABLES: 'TRUE'
+      ENV_VARIABLE_PREFIX: CFG_
+      CFG_DB_HOST: http://localhost:3306
+      CFG_DB_NAME: IOTstack Minecraft
+      CFG_DB_PASSWORD_FILE: /run/secrets/db_password
+    restart: unless-stopped
+  rcon:
+    image: itzg/rcon
+    ports:
+    - 4326:4326
+    - 4327:4327
+    volumes:
+    - ./volumes/rcon_data:/opt/rcon-web-admin/db
+secrets:
+  db_password:
+    file: ./db_password
+
+

Do note that the order of the YAML keys is not guaranteed.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Basic_setup/Default-Configs/index.html b/Basic_setup/Default-Configs/index.html new file mode 100644 index 000000000..285440d5d --- /dev/null +++ b/Basic_setup/Default-Configs/index.html @@ -0,0 +1,2411 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Default ports - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Default ports

+

Here you can find a list of the default mode and ports used by each service found in the .templates directory.

+

This list can be generated by running the default_ports_md_generator.sh script.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Service NameModePort(s)
External:Internal
adguardhomenon-host53:53
8089:8089
3001:3000
adminernon-host9080:8080
blynk_servernon-host8180:8080
8440:8440
9443:9443
chronografnon-host8888:8888
dashmachinenon-host5000:5000
deconznon-host8090:80
443:443
5901:5900
diyhuenon-host8070:80
1900:1900
1982:1982
2100:2100
domoticznon-host8083:8080
6144:6144
1443:1443
dozzlenon-host8889:8080
duckdnshost
espruinohubhost
giteanon-host7920:3000
2222:22
grafananon-host3000:3000
heimdallnon-host8880:80
8883:443
home_assistanthost
homebridgehost
homernon-host8881:8080
influxdbnon-host8086:8086
influxdb2non-host8087:8086
kapacitornon-host9092:9092
mariadbnon-host3306:3306
mosquittonon-host1883:1883
"motioneye"non-host8765:8765
8081:8081
"n8n"non-host5678:5678
nextcloudnon-host9321:80
noderednon-host1880:1880
octoprintnon-host9980:80
openhabhost
piholenon-host8089:80
53:53
67:67
plexhost
portainer-cenon-host8000:8000
9000:9000
portainer-agentnon-host9001:9001
postgresnon-host5432:5432
prometheus-cadvisornon-host8082:8080
prometheus-nodeexporternon-host
prometheusnon-host9090:9090
pythonnon-host
qbittorrentnon-host6881:6881
15080:15080
1080:1080
ring-mqttnon-host8554:8554
55123:55123
rtl_433non-host
scryptedhost10443:10443
syncthinghost
tasmoadminnon-host8088:80
telegrafnon-host8092:8092
8094:8094
8125:8125
timescaledbnon-host
transmissionnon-host9091:9091
51413:51413
webthingsio_gatewayhost
wireguardnon-host51820:51820
zerotierhost
zerotierhost
zigbee2mqttnon-host8080:8080
zigbee2mqtt_assistantnon-host8880:80
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Basic_setup/Docker-commands/index.html b/Basic_setup/Docker-commands/index.html new file mode 100644 index 000000000..d90ae9391 --- /dev/null +++ b/Basic_setup/Docker-commands/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Basic_setup/Docker/index.html b/Basic_setup/Docker/index.html new file mode 100644 index 000000000..45ed183f4 --- /dev/null +++ b/Basic_setup/Docker/index.html @@ -0,0 +1,2260 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Docker - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Docker

+ +

Logging

+

When Docker starts a container, it executes its entrypoint command. Any +output produced by this command is logged by Docker. By default Docker stores +logs internally together with other data associated to the container image.

+

This has the effect that when recreating or updating a container, logs shown by +docker-compose logs won't show anything associated with the previous +instance. Use docker system prune to remove old instances and free up disk +space. Keeping logs only for the latest instance is helpful when testing, but +may not be desirable for production.

+

By default there is no limit on the log size. Surprisingly, when using a +SD-card this is exactly what you want. If a runaway container floods the log +with output, writing will stop when the disk becomes full. Without a mechanism +to prevent such excessive writing, the SD-card would keep being written to +until the flash hardware program-erase cycle limit is +reached, after which it is permanently broken.

+

When using a quality SSD-drive, potential flash-wear isn't usually a +concern. Then you can enable log-rotation by either:

+
    +
  1. +

    Configuring Docker to do it for you automatically. Edit your + docker-compose.yml and add a top-level x-logging and a logging: to + each service definition. The Docker compose reference documentation has + a good example.

    +
  2. +
  3. +

    Configuring Docker to log to the host system's journald.

    +

    ps. if /etc/docker/daemon.json doesn't exist, just create it.

    +
  4. +
+

Aliases

+

Bash aliases for stopping and starting the stack and other common operations +are in the file .bash_aliases. To use them immediately and in future logins, +run in a console:

+
$ source ~/IOTstack/.bash_aliases
+$ echo "source ~/IOTstack/.bash_aliases" >> ~/.profile
+
+

These commands no longer need to be executed from the IOTstack directory and can be executed in any directory

+
.bash_aliases
IOTSTACK_HOME="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+alias iotstack_up="cd "$IOTSTACK_HOME" && docker-compose up -d --remove-orphans"
+alias iotstack_down="cd "$IOTSTACK_HOME" && docker-compose down --remove-orphans"
+alias iotstack_start="cd "$IOTSTACK_HOME" && docker-compose start"
+alias iotstack_stop="cd "$IOTSTACK_HOME" && docker-compose stop"
+alias iotstack_pull="cd "$IOTSTACK_HOME" && docker-compose pull"
+alias iotstack_build="cd "$IOTSTACK_HOME" && docker-compose build --pull --no-cache"
+alias iotstack_update_docker_images='f(){ iotstack_pull "$@" && iotstack_build "$@" && iotstack_up --build "$@"; }; f'
+
+

You can now type iotstack_up. The aliases also accept additional parameters, +e.g. iotstack_stop portainer.

+

The iotstack_update_docker_images alias will update docker images to newest +released images, build and recreate containers. Do note that using this will +result in a broken containers from time to time, as upstream may release faulty +docker images. Have proper backups, or be prepared to manually pin a previous +release build by editing docker-compose.yml.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Basic_setup/How-the-script-works/index.html b/Basic_setup/How-the-script-works/index.html new file mode 100644 index 000000000..19869f59b --- /dev/null +++ b/Basic_setup/How-the-script-works/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Basic_setup/Menu/index.html b/Basic_setup/Menu/index.html new file mode 100644 index 000000000..6e94b4efa --- /dev/null +++ b/Basic_setup/Menu/index.html @@ -0,0 +1,2381 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Menu - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Menu

+ +

The menu.sh-script is used to create or modify the docker-compose.yml-file. +This file defines how all containers added to the stack are configured.

+

Miscellaneous

+

log2ram

+

One of the drawbacks of an sd card is that it has a limited lifespan. One way +to reduce the load on the sd card is to move your log files to RAM. log2ram is a convenient tool to simply set this up. +It can be installed from the miscellaneous menu.

+

This only affects logs written to /var/log, and won't have any effect on Docker +logs or logs stored inside containers.

+

Dropbox-Uploader

+

This a great utility to easily upload data from your PI to the cloud. The +MagPi has an +excellent explanation of the process of setting up the Dropbox API. +Dropbox-Uploader is used in the backup script.

+

Backup and Restore

+

See Backing up and restoring IOTstack

+

Native Installs

+

RTL_433

+

RTL_433 can be installed from the "Native install sections"

+

This video demonstrates +how to use RTL_433

+

RPIEasy

+

The installer will install any dependencies. If ~/rpieasy exists it will +update the project to its latest, if not it will clone the project

+

RPIEasy can be run by sudo ~/rpieasy/RPIEasy.py

+

To have RPIEasy start on boot in the webui under hardware look for "RPIEasy +autostart at boot"

+

RPIEasy will select its ports from the first available one in the list +(80,8080,8008). If you run Hass.io then there will be a conflict so check the +next available port

+

Old-menu branch details

+

The build script creates the ./services directory and populates it from the +template file in .templates . The script then appends the text withing each +service.yml file to the docker-compose.yml . When the stack is rebuilt the menu +does not overwrite the service folder if it already exists. Make sure to sync +any alterations you have made to the docker-compose.yml file with the +respective service.yml so that on your next build your changes pull through.

+

The .gitignore file is setup such that if you do a git pull origin master it +does not overwrite the files you have already created. Because the build script +does not overwrite your service directory any changes in the .templates +directory will have no affect on the services you have already made. You will +need to move your service folder out to get the latest version of the template.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Basic_setup/Misc/index.html b/Basic_setup/Misc/index.html new file mode 100644 index 000000000..19869f59b --- /dev/null +++ b/Basic_setup/Misc/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Basic_setup/Native-RTL_433/index.html b/Basic_setup/Native-RTL_433/index.html new file mode 100644 index 000000000..19869f59b --- /dev/null +++ b/Basic_setup/Native-RTL_433/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Basic_setup/Networking/index.html b/Basic_setup/Networking/index.html new file mode 100644 index 000000000..400ca0163 --- /dev/null +++ b/Basic_setup/Networking/index.html @@ -0,0 +1,2235 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Networking - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Networking

+

The docker-compose instruction creates an internal network for the containers to communicate in, the ports get exposed to the PI's IP address when you want to connect from outside. It also creates a "DNS" the name being the container name. So it is important to note that when one container talks to another they talk by name. All the containers names are lowercase like nodered, influxdb...

+

An easy way to find out your IP is by typing ip address in the terminal and look next to eth0 or wlan0 for your IP. It is highly recommended that you set a static IP for your PI or at least reserve an IP on your router so that you know it

+

Check the docker-compose.yml to see which ports have been used

+
+

net

+
+

Examples

+
    +
  • You want to connect your nodered to your mqtt server. In nodered drop an mqtt node, when you need to specify the address type mosquitto
  • +
  • You want to connect to your influxdb from grafana. You are in the Docker network and you need to use the name of the Container. The address you specify in the grafana is http://influxdb:8086
  • +
  • You want to connect to the web interface of grafana from your laptop. Now you are outside the container environment you type PI's IP eg 192.168.n.m:3000
  • +
+

Ports

+

Many containers try to use popular ports such as 80,443,8080. For example openHAB and Adminer both want to use port 8080 for their web interface. Adminer's port has been moved 9080 to accommodate this. Please check the description of the container in the README to see if there are any changes as they may not be the same as the port you are used to.

+

Port mapping is done in the docker-compose.yml file. Each service should have a section that reads like this: +

    ports:
+      - HOST_PORT:CONTAINER_PORT
+
+For adminer: +
    ports:
+      - 9080:8080
+
+Port 9080 on Host Pi is mapped to port 8080 of the container. Therefore 127.0.0.1:8080 will take you to openHAB, where 127.0.0.1:9080 will take you to adminer

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Basic_setup/RPIEasy_native/index.html b/Basic_setup/RPIEasy_native/index.html new file mode 100644 index 000000000..19869f59b --- /dev/null +++ b/Basic_setup/RPIEasy_native/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Basic_setup/Troubleshooting/index.html b/Basic_setup/Troubleshooting/index.html new file mode 100644 index 000000000..47b8ee9da --- /dev/null +++ b/Basic_setup/Troubleshooting/index.html @@ -0,0 +1,2593 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Troubleshooting - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Troubleshooting

+ +

Resources

+
    +
  • +

    Search github issues.

    +
      +
    • Closed issues or pull-requests may also have valuable hints.
    • +
    +
  • +
  • +

    Ask questions on IOTStack Discord. Or report + how you were able to fix a problem.

    +
  • +
  • +

    There are over 40 gists about IOTstack. These address a diverse range of + topics from small convenience scripts to complete guides. These are + individual contributions that aren't reviewed.

    +

    You can add your own keywords into the search: +https://gist.github.com/search?q=iotstack

    +
  • +
+

FAQ

+
+

Breaking update

+

A change done 2022-01-18 will require manual steps +or you may get an error like:
+ERROR: Service "influxdb" uses an undefined network "iotstack_nw"

+
+

Device Errors

+

If you are trying to run IOTstack on non-Raspberry Pi hardware, you will probably get the following error from docker-compose when you try to bring up your stack for the first time:

+
Error response from daemon: error gathering device information while adding custom device "/dev/ttyAMA0": no such file or directory
+
+
+

You will get a similar message about any device which is not known to your hardware.

+
+

The /dev/ttyAMA0 device is the Raspberry Pi's built-in serial port so it is guaranteed to exist on any "real" Raspberry Pi. As well as being referenced by containers that can actually use the serial port, ttyAMA0 is often employed as a placeholder.

+

Examples:

+
    +
  • Node-RED flows can use the node-red-node-serialport node to access the serial port. This is an example of "actual use";
  • +
  • +

    The Zigbee2MQTT container employs ttyAMA0 as a placeholder. This allows the container to start. Once you have worked out how your Zigbee adapter appears on your system, you will substitute your adapter's actual device path. For example:

    +
    - "/dev/serial/by-id/usb-Texas_Instruments_TI_CC2531_USB_CDC___0X00125B0028EEEEE0-if00:/dev/ttyACM0"
    +
    +
  • +
+

The simplest approach to solving "error gathering device information" problems is just to comment-out every device mapping that produces an error and, thereafter, treat the comments as documentation about what the container is expecting at run-time. For example, this is the devices list for Node-RED:

+
  devices:
+    - "/dev/ttyAMA0:/dev/ttyAMA0"
+    - "/dev/vcio:/dev/vcio"
+    - "/dev/gpiomem:/dev/gpiomem"
+
+

Those are, in turn, the Raspberry Pi's:

+
    +
  • serial port
  • +
  • videoCore multimedia processor
  • +
  • mechanism for accessing GPIO pin headers
  • +
+

If none of those is available on your chosen platform (the usual situation on non-Pi hardware), commenting-out the entire block is appropriate:

+
# devices:
+#   - "/dev/ttyAMA0:/dev/ttyAMA0"
+#   - "/dev/vcio:/dev/vcio"
+#   - "/dev/gpiomem:/dev/gpiomem"
+
+

You interpret each line in a device map like this:

+
    - "«external»:«internal»"
+
+

The «external» device is what the platform (operating system plus hardware) sees. The «internal» device is what the container sees. Although it is reasonably common for the two sides to be the same, this is not a requirement. It is usual to replace the «external» device with the actual device while leaving the «internal» device unchanged.

+

Here is an example. On macOS, a CP2102 USB-to-Serial adapter shows up as:

+
/dev/cu.SLAB_USBtoUART
+
+

Assume you are running the Node-RED container in macOS Docker Desktop, and that you want a flow to communicate with the CP2102. You would change the service definition like this:

+
  devices:
+    - "/dev/cu.SLAB_USBtoUART:/dev/ttyAMA0"
+#   - "/dev/vcio:/dev/vcio"
+#   - "/dev/gpiomem:/dev/gpiomem"
+
+

In other words, the «external» (real world) device cu.SLAB_USBtoUART is mapped to the «internal» (container) device ttyAMA0. The flow running in the container is expecting to communicate with ttyAMA0 and is none-the-wiser.

+

Needing to use sudo to run docker commands

+

You should never (repeat never) use sudo to run docker or docker compose commands. Forcing docker to do something with sudo almost always creates more problems than it solves. Please see What is sudo? to understand how sudo actually works.

+

If docker or docker-compose commands seem to need elevated privileges, the most likely explanation is incorrect group membership. Please read the next section about errors involving docker.sock. The solution (two usermod commands) is the same.

+

If, however, the current user is a member of the docker group but you still get error responses that seem to imply a need for sudo, it implies that something fundamental is broken. Rather than resorting to sudo, you are better advised to rebuild your system.

+

Errors involving docker.sock

+

If you encounter permission errors that mention /var/run/docker.sock, the most likely explanation is the current user (usually "pi") not being a member of the "docker" group.

+

You can check membership with the groups command:

+
$ groups
+pi adm dialout cdrom sudo audio video plugdev games users input render netdev bluetooth lpadmin docker gpio i2c spi
+
+

In that list, you should expect to see both bluetooth and docker. If you do not, you can fix the problem like this:

+
$ sudo usermod -G docker -a $USER
+$ sudo usermod -G bluetooth -a $USER
+$ exit
+
+

The exit statement is required. You must logout and login again for the two usermod commands to take effect. An alternative is to reboot.

+

System freezes or SSD problems

+

You should read this section if you experience any of the following problems:

+
    +
  • Apparent system hangs, particularly if Docker containers were running at the time the system was shutdown or rebooted;
  • +
  • Much slower than expected performance when reading/writing your SSD; or
  • +
  • Suspected data-corruption on your SSD.
  • +
+

Try a USB2 port

+

Start by shutting down your Pi and moving your SSD to one of the USB2 ports. The slower speed will often alleviate the problem.

+

Tips:

+
    +
  1. +

    If you don't have sufficient control to issue a shutdown and/or your Pi won't shut down cleanly:

    +
      +
    • remove power
    • +
    • move the SSD to a USB2 port
    • +
    • apply power again.
    • +
    +
  2. +
  3. +

    If you run "headless" and find that the Pi responds to pings but you can't connect via SSH:

    +
      +
    • remove power
    • +
    • connect the SSD to a support platform (Linux, macOS, Windows)
    • +
    • create a file named "ssh" at the top level of the boot partition
    • +
    • eject the SSD from your support platform
    • +
    • connect the SSD to a USB2 port on your Pi
    • +
    • apply power again.
    • +
    +
  4. +
+

Check the dhcpcd patch

+

Next, verify that the dhcpcd patch is installed. There seems to be a timing component to the deadlock which is why it can be alleviated, to some extent, by switching the SSD to a USB2 port.

+

If the dhcpcd patch was not installed but you have just installed it, try returning the SSD to a USB3 port.

+

Try a quirks string

+

If problems persist even when the dhcpcd patch is in place, you may have an SSD which isn't up to the Raspberry Pi's expectations. Try the following:

+
    +
  1. If your IOTstack is running, take it down.
  2. +
  3. If your SSD is attached to a USB3 port, shut down your Pi, move the SSD to a USB2 port, and apply power.
  4. +
  5. +

    Run the following command:

    +
    $ dmesg | grep "\] usb [[:digit:]]-"
    +
    +

    In the output, identify your SSD. Example:

    +
    [    1.814248] usb 2-1: new SuperSpeed Gen 1 USB device number 2 using xhci_hcd
    +[    1.847688] usb 2-1: New USB device found, idVendor=f0a1, idProduct=f1b2, bcdDevice= 1.00
    +[    1.847708] usb 2-1: New USB device strings: Mfr=99, Product=88, SerialNumber=77
    +[    1.847723] usb 2-1: Product: Blazing Fast SSD
    +[    1.847736] usb 2-1: Manufacturer: Suspect Drives
    +
    +

    In the above output, the second line contains the Vendor and Product codes that you need:

    +
      +
    • idVendor=f0a1
    • +
    • idProduct=f1b2
    • +
    +
  6. +
  7. +

    Substitute the values of «idVendor» and «idProduct» into the following command template:

    +
    sed -i.bak '1s/^/usb-storage.quirks=«idVendor»:«idProduct»:u /' "$CMDLINE"
    +
    +

    This is known as a "quirks string". Given the dmesg output above, the string would be:

    +
    sed -i.bak '1s/^/usb-storage.quirks=f0a1:f1b2:u /' "$CMDLINE"
    +
    +

    Make sure that you keep the space between the :u and /'. You risk breaking your system if that space is not there.

    +
  8. +
  9. +

    Run these commands - the second line is the one you prepared in step 4 using sudo:

    +
    $ CMDLINE="/boot/firmware/cmdline.txt" && [ -e "$CMDLINE" ] || CMDLINE="/boot/cmdline.txt"
    +$ sudo sed -i.bak '1s/^/usb-storage.quirks=f0a1:f1b2:u /' "$CMDLINE"
    +
    +

    The command:

    +
      +
    • makes a backup copy of cmdline.txt as cmdline.txt.bak
    • +
    • inserts the quirks string at the start of cmdline.txt.
    • +
    +

    You can confirm the result as follows:

    +
      +
    • +

      display the original (baseline reference):

      +
      $ cat "$CMDLINE.bak"
      +console=serial0,115200 console=tty1 root=PARTUUID=06c69364-02 rootfstype=ext4 fsck.repair=yes rootwait quiet splash plymouth.ignore-serial-consoles
      +
      +
    • +
    • +

      display the modified version:

      +
      $ cat "$CMDLINE"
      +usb-storage.quirks=f0a1:f1b2:u console=serial0,115200 console=tty1 root=PARTUUID=06c69364-02 rootfstype=ext4 fsck.repair=yes rootwait quiet splash plymouth.ignore-serial-consoles
      +
      +
    • +
    +
  10. +
  11. +

    Shutdown your Pi.

    +
  12. +
  13. Connect your SSD to a USB3 port and apply power.
  14. +
+

There is more information about this problem on the Raspberry Pi forum.

+

Getting a clean slate

+

If you create a mess and can't see how to recover, try proceeding like this:

+
$ cd ~/IOTstack
+$ docker-compose down
+$ cd
+$ mv IOTstack IOTstack.old
+$ git clone https://github.com/SensorsIot/IOTstack.git IOTstack
+
+

In words:

+
    +
  1. Be in the right directory.
  2. +
  3. Take the stack down.
  4. +
  5. The cd command without any arguments changes your working directory to + your home directory (variously known as ~ or $HOME or /home/pi).
  6. +
  7. +

    Move your existing IOTstack directory out of the way. If you get a + permissions problem:

    + +
  8. +
  9. +

    Check out a clean copy of IOTstack.

    +
  10. +
+

Now, you have a clean slate and can start afresh by running the menu:

+
$ cd ~/IOTstack
+$ ./menu.sh
+
+

The IOTstack.old directory remains available as a reference for as long as +you need it. Once you have no further use for it, you can clean it up via:

+
$ cd
+$ sudo rm -rf ./IOTstack.old # (1)
+
+
    +
  1. The sudo command is needed in this situation because some files and + folders (eg the "volumes" directory and most of its contents) are owned by + root.
  2. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Basic_setup/Understanding-Containers/index.html b/Basic_setup/Understanding-Containers/index.html new file mode 100644 index 000000000..4690cb9d6 --- /dev/null +++ b/Basic_setup/Understanding-Containers/index.html @@ -0,0 +1,2434 @@ + + + + + + + + + + + + + + + + + + + + + + + + + What is Docker? - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

What is Docker?

+

In simple terms, Docker is a software platform that simplifies the process of building, running, +managing and distributing applications. It does this by virtualizing the operating system of the +computer on which it is installed and running.

+

The Problem

+

Let’s say you have three different Python-based applications that you plan to host on a single server +(which could either be a physical or a virtual machine).

+

Each of these applications makes use of a different version of Python, as well as the associated +libraries and dependencies, differ from one application to another.

+

Since we cannot have different versions of Python installed on the same machine, this prevents us from +hosting all three applications on the same computer.

+

The Solution

+

Let’s look at how we could solve this problem without making use of Docker. In such a scenario, we +could solve this problem either by having three physical machines, or a single physical machine, which +is powerful enough to host and run three virtual machines on it.

+

Both the options would allow us to install different versions of Python on each of these machines, +along with their associated dependencies.

+

The machine on which Docker is installed and running is usually referred to as a Docker Host or Host in +simple terms. So, whenever you plan to deploy an application on the host, it would create a logical +entity on it to host that application. In Docker terminology, we call this logical entity a Container or +Docker Container to be more precise.

+

Whereas the kernel of the host’s operating system is shared across all the containers that are running +on it.

+

This allows each container to be isolated from the other present on the same host. Thus it supports +multiple containers with different application requirements and dependencies to run on the same host, +as long as they have the same operating system requirements.

+

Docker Terminology

+

Docker Images and Docker Containers are the two essential things that you will come across daily while +working with Docker.

+

In simple terms, a Docker Image is a template that contains the application, and all the dependencies +required to run that application on Docker.

+

On the other hand, as stated earlier, a Docker Container is a logical entity. In more precise terms, +it is a running instance of the Docker Image.

+

What is Docker-Compose?

+

Docker Compose provides a way to orchestrate multiple containers that work together. Docker compose +is a simple yet powerful tool that is used to run multiple containers as a single service. +For example, suppose you have an application which requires Mqtt as a communication service between IOT devices +and OpenHAB instance as a Smarthome application service. In this case by docker-compose, you can create one +single file (docker-compose.yml) which will create both the containers as a single service without starting +each separately. It wires up the networks (literally), mounts all volumes and exposes the ports.

+

The IOTstack with the templates and menu is a generator for that docker-compose service descriptor.

+

How Docker Compose Works?

+

use yaml files to configure application services (docker-compose.yaml) +can start all the services with a single command ( docker-compose up ) +can stop all the service with a single command ( docker-compose down )

+

How are the containers connected

+

The containers are automagically connected when we run the stack with docker-compose up. +The containers using same logical network (by default) where the instances can access each other with the instance +logical name. Means if there is an instance called mosquitto and an openhab, when openHAB instance need +to access mqtt on that case the domain name of mosquitto will be resolved as the runnuning instance of mosquitto.

+

How the container are connected to host machine

+

Volumes

+

The containers are enclosed processes which state are lost with the restart of container. To be able to +persist states volumes (images or directories) can be used to share data with the host. +Which means if you need to persist some database, configuration or any state you have to bind volumes where the +running service inside the container will write files to that binded volume. +In order to understand what a Docker volume is, we first need to be clear about how the filesystem normally works +in Docker. Docker images are stored as series of read-only layers. When we start a container, Docker takes +the read-only image and adds a read-write layer on top. If the running container modifies an existing file, +the file is copied out of the underlying read-only layer and into the top-most read-write layer where the +changes are applied. The version in the read-write layer hides the underlying file, but does not +destroy it -- it still exists in the underlying layer. When a Docker container is deleted, +relaunching the image will start a fresh container without any of the changes made in the previously +running container -- those changes are lost, thats the reason that configs, databases are not persisted,

+

Volumes are the preferred mechanism for persisting data generated by and used by Docker containers. +While bind mounts are dependent on the directory structure of the host machine, volumes are completely +managed by Docker. In IOTstack project uses the volumes directory in general to bind these container volumes.

+

Ports

+

When containers running a we would like to delegate some services to the outside world, for example +OpenHAB web frontend have to be accessible for users. There are several ways to achive that. One is +mounting the port to the most machine, this called port binding. On that case service will have a dedicated +port which can be accessed, one drawback is one host port can be used one serice only. Another way is reverse proxy. +The term reverse proxy (or Load Balancer in some terminology) is normally applied to a service that sits in front +of one or more servers (in our case containers), accepting requests from clients for resources located on the +server(s). From the client point of view, the reverse proxy appears to be the web server and so is +totally transparent to the remote user. Which means several service can share same port the server +will route the request by the URL (virtual domain or context path). For example, there is grafana and openHAB +instances, where the opeanhab.domain.tld request will be routed to openHAB instance 8181 port while +grafana.domain.tld to grafana instance 3000 port. On that case the proxy have to be mapped for host port 80 and/or +444 on host machine, the proxy server will access the containers via the docker virtual network.

+

Source materials used:

+

https://takacsmark.com/docker-compose-tutorial-beginners-by-example-basics/ +https://www.freecodecamp.org/news/docker-simplified-96639a35ff36/ +https://www.cloudflare.com/learning/cdn/glossary/reverse-proxy/ +https://blog.container-solutions.com/understanding-volumes-docker

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Basic_setup/What-is-sudo/index.html b/Basic_setup/What-is-sudo/index.html new file mode 100644 index 000000000..c532b76a1 --- /dev/null +++ b/Basic_setup/What-is-sudo/index.html @@ -0,0 +1,2232 @@ + + + + + + + + + + + + + + + + + + + + + + + + + What is sudo? - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

What is sudo?

+

Many first-time users of IOTstack get into difficulty by misusing the sudo command. The problem is best understood by example. In the following, you would expect ~ (tilde) to expand to /home/pi. It does:

+
$ echo ~/IOTstack
+/home/pi/IOTstack
+
+

The command below sends the same echo command to bash for execution. This is what happens when you type the name of a shell script. You get a new instance of bash to run the script:

+
$ bash -c 'echo ~/IOTstack'
+/home/pi/IOTstack
+
+

Same answer. Again, this is what you expect. But now try it with sudo on the front:

+
$ sudo bash -c 'echo ~/IOTstack'
+/root/IOTstack
+
+

Different answer. It is different because sudo means "become root, and then run the command". The process of becoming root changes the home directory, and that changes the definition of ~.

+

Any script designed for working with IOTstack assumes ~ (or the equivalent $HOME variable) expands to /home/pi. That assumption is invalidated if the script is run by sudo.

+

Of necessity, any script designed for working with IOTstack will have to invoke sudo inside the script when it is required. You do not need to second-guess the script's designer.

+

Please try to minimise your use of sudo when you are working with IOTstack. Here are some rules of thumb:

+
    +
  1. +

    Is what you are about to run a script? If yes, check whether the script already contains sudo commands. Using menu.sh as the example:

    +
    $ grep -c 'sudo' ~/IOTstack/menu.sh
    +28
    +
    +

    There are numerous uses of sudo within menu.sh. That means the designer thought about when sudo was needed.

    +
  2. +
  3. +

    Did the command you just executed work without sudo? Note the emphasis on the past tense. If yes, then your work is done. If no, and the error suggests elevated privileges are necessary, then re-execute the last command like this:

    +
    $ sudo !!
    +
    +
  4. +
+

It takes time, patience and practice to learn when sudo is actually needed. Over-using sudo out of habit, or because you were following a bad example you found on the web, is a very good way to find that you have created so many problems for yourself that will need to reinstall your IOTstack. Please err on the side of caution!

+

Configuration

+

To edit sudo functionality and permissions use: sudo visudo

+

For instance, to allow sudo usage without prompting for a password: +

# Allow members of group sudo to execute any command without password prompt
+%sudo   ALL=(ALL:ALL) NOPASSWD:ALL
+

+

For more information: man sudoers

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Basic_setup/index.html b/Basic_setup/index.html new file mode 100644 index 000000000..45609d037 --- /dev/null +++ b/Basic_setup/index.html @@ -0,0 +1,3370 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Getting Started - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Getting Started

+

About IOTstack

+

IOTstack is not a system. It is a set of conventions for assembling arbitrary collections of containers into something that has a reasonable chance of working out-of-the-box. The three most important conventions are:

+
    +
  1. +

    If a container needs information to persist across restarts (and most containers do) then the container's persistent store will be found at:

    +
    ~/IOTstack/volumes/«container»
    +
    +

    Most service definitions examples found on the web have a scattergun approach to this problem. IOTstack imposes order on this chaos.

    +
  2. +
  3. +

    To the maximum extent possible, network port conflicts have been sorted out in advance.

    +

    Sometimes this is not possible. For example, Pi-hole and AdGuardHome both offer Domain Name System services. The DNS relies on port 53. You can't have two containers claiming port 53 so the only way to avoid this is to pick either Pi-hole or AdGuardHome. +3. Where multiple containers are needed to implement a single user-facing service, the IOTstack service definition will include everything needed. A good example is NextCloud which relies on MariaDB. IOTstack implements MariaDB as a private instance which is only available to NextCloud. This strategy ensures that you are able to run your own separate MariaDB container without any risk of interference with your NextCloud service.

    +
  4. +
+

Requirements

+

IOTstack makes the following assumptions:

+
    +
  1. +

    Your hardware is capable of running Debian or one of its derivatives. Examples that are known to work include:

    +
      +
    • +

      a Raspberry Pi (typically a 3B+ or 4B)

      +
      +

      The Raspberry Pi Zero W2 has been tested with IOTstack. It works but the 512MB RAM means you should not try to run too many containers concurrently.

      +
      +
    • +
    • +

      Orange Pi Win/Plus see also issue 375

      +
    • +
    • an Intel-based Mac running macOS plus Parallels with a Debian guest.
    • +
    • an Intel-based platform running Proxmox with a Debian guest.
    • +
    +
  2. +
  3. +

    Your host or guest system is running a reasonably-recent version of Debian or an operating system which is downstream of Debian in the Linux family tree, such as Raspberry Pi OS (aka "Raspbian") or Ubuntu.

    +

    IOTstack is known to work in 32-bit mode but not all containers have images on DockerHub that support 320bit mode. If you are setting up a new system from scratch, you should choose a 64-bit option.

    +

    IOTstack was known to work with Buster but it has not been tested recently. Bullseye is known to work but if you are setting up a new system from scratch, you should choose Bookworm.

    +

    Please don't waste your own time trying Linux distributions from outside the Debian family tree. They are unlikely to work.

    +
  4. +
  5. +

    You are logged-in as the default user (ie not root). In most cases, this is the user with ID=1000 and is what you get by default on either a Raspberry Pi OS or Debian installation.

    +

    This assumption is not really an IOTstack requirement as such. However, many containers assume UID=1000 exists and you are less likely to encounter issues if this assumption holds.

    +
  6. +
+

Please don't read these assumptions as saying that IOTstack will not run on other hardware, other operating systems, or as a different user. It is just that IOTstack gets most of its testing under these conditions. The further you get from these implicit assumptions, the more your mileage may vary.

+

New installation

+

You have two choices:

+
    +
  1. If you have an existing system and you want to add IOTstack to it, then the add-on method is your best choice.
  2. +
  3. If you are setting up a new system from scratch, then PiBuilder is probably your best choice. You can, however, also use the add-on method in a green-fields installation.
  4. +
+

add-on method

+

This method assumes an existing system rather than a green-fields installation. The script uses the principle of least interference. It only installs the bare minimum of prerequisites and, with the exception of adding some boot time options to your Raspberry Pi (but not any other kind of hardware), makes no attempt to tailor your system.

+

To use this method:

+
    +
  1. +

    Install curl:

    +
    $ sudo apt install -y curl
    +
    +
  2. +
  3. +

    Run the following command:

    +
    $ curl -fsSL https://raw.githubusercontent.com/SensorsIot/IOTstack/master/install.sh | bash
    +
    +
  4. +
+

The install.sh script is designed to be run multiple times. If the script discovers a problem, it will explain how to fix that problem and, assuming you follow the instructions, you can safely re-run the script. You can repeat this process until the script completes normally.

+

PiBuilder method

+

Compared with the add-on method, PiBuilder is far more comprehensive. PiBuilder:

+
    +
  1. Does everything the add-on method does.
  2. +
  3. Adds support packages and debugging tools that have proven useful in the IOTstack context.
  4. +
  5. Installs all required system patches (see next section).
  6. +
  7. +

    In addition to cloning IOTstack (this repository), PiBuilder also clones:

    +
      +
    • IOTstackBackup which is an alternative to the backup script supplied with IOTstack but does not require your stack to be taken down to perform backups; and
    • +
    • IOTstackAliases which provides shortcuts for common IOTstack operations.
    • +
    +
  8. +
  9. +

    Performs extra tailoring intended to deliver a rock-solid platform for IOTstack.

    +
  10. +
+

PiBuilder does, however, assume a green fields system rather than an existing installation. Although the PiBuilder scripts will probably work on an existing system, that scenario has never been tested so it's entirely at your own risk.

+

PiBuilder actually has two specific use-cases:

+
    +
  1. A first-time build of a system to run IOTstack; and
  2. +
  3. The ability to create your own customised version of PiBuilder so that you can quickly rebuild your Raspberry Pi or Proxmox guest after a disaster. Combined with IOTstackBackup, you can go from bare metal to a running system with data restored in about half an hour.
  4. +
+

Required system patches

+

You can skip this section if you used PiBuilder to construct your system. That's because PiBuilder installs all necessary patches automatically.

+

If you used the add-on method, you should consider applying these patches by hand. Unless you know that a patch is not required, assume that it is needed.

+

patch 1 – restrict DHCP

+

Run the following commands:

+
$ sudo bash -c '[ $(egrep -c "^allowinterfaces eth\*,wlan\*" /etc/dhcpcd.conf) -eq 0 ] && echo "allowinterfaces eth*,wlan*" >> /etc/dhcpcd.conf'
+
+

This patch prevents the dhcpcd daemon from trying to allocate IP addresses to Docker's docker0 and veth interfaces. Docker assigns the IP addresses itself and dhcpcd trying to get in on the act can lead to a deadlock condition which can freeze your Pi.

+

See Issue 219 and Issue 253 for more information.

+

patch 2 – update libseccomp2

+

This patch is ONLY for Raspbian Buster. Do NOT install this patch if you are running Raspbian Bullseye or Bookworm.

+
    +
  1. +

    check your OS release

    +

    Run the following command:

    +
    $ grep "PRETTY_NAME" /etc/os-release
    +PRETTY_NAME="Raspbian GNU/Linux 10 (buster)"
    +
    +

    If you see the word "buster", proceed to step 2. Otherwise, skip this patch.

    +
  2. +
  3. +

    if you are indeed running "buster"

    +

    Without this patch on Buster, Docker images will fail if:

    +
      +
    • the image is based on Alpine and the image's maintainer updates to Alpine 3.13; and/or
    • +
    • an image's maintainer updates to a library that depends on 64-bit values for Unix epoch time (the so-called Y2038 problem).
    • +
    +

    To install the patch:

    +
    $ sudo apt-key adv --keyserver hkps://keyserver.ubuntu.com:443 --recv-keys 04EE7237B7D453EC 648ACFD622F3D138
    +$ echo "deb http://httpredir.debian.org/debian buster-backports main contrib non-free" | sudo tee -a "/etc/apt/sources.list.d/debian-backports.list"
    +$ sudo apt update
    +$ sudo apt install libseccomp2 -t buster-backports
    +
    +
  4. +
+

patch 3 - kernel control groups

+

Kernel control groups need to be enabled in order to monitor container specific +usage. This makes commands like docker stats fully work. Also needed for full +monitoring of docker resource usage by the telegraf container.

+

Enable by running (takes effect after reboot):

+
$ CMDLINE="/boot/firmware/cmdline.txt" && [ -e "$CMDLINE" ] || CMDLINE="/boot/cmdline.txt"
+$ echo $(cat "$CMDLINE") cgroup_memory=1 cgroup_enable=memory | sudo tee "$CMDLINE"
+$ sudo reboot
+
+

the IOTstack menu

+

The menu is used to construct your docker-compose.yml file. That file is read by docker-compose which issues the instructions necessary for starting your stack.

+

The menu is a great way to get started quickly but it is only an aid. It is a good idea to learn the various docker and docker-compose commands so you can use them outside the menu. It is also a good idea to study the docker-compose.yml generated by the menu to see how everything is put together. You will gain a lot of flexibility if you learn how to add containers by hand.

+

In essence, the menu is a concatenation tool which appends service definitions that exist inside the hidden ~/IOTstack/.templates folder to your docker-compose.yml.

+

Once you understand what the menu does (and, more importantly, what it doesn't do), you will realise that the real power of IOTstack lies not in its menu system but resides in its conventions.

+

menu item: Build Stack

+

To create your first docker-compose.yml:

+
$ cd ~/IOTstack
+$ ./menu.sh
+Select "Build Stack"
+
+

Follow the on-screen prompts and select the containers you need.

+
+

The best advice we can give is "start small". Limit yourself to the core containers you actually need (eg Mosquitto, Node-RED, InfluxDB, Grafana, Portainer). You can always add more containers later. Some users have gone overboard with their initial selections and have run into what seem to be Raspberry Pi OS limitations.

+
+

Key point:

+
    +
  • If you are running "new menu" (master branch) and you select Node-RED, you must press the right-arrow and choose at least one add-on node. If you skip this step, Node-RED will not build properly.
  • +
  • Old menu forces you to choose add-on nodes for Node-RED.
  • +
+

The process finishes by asking you to bring up the stack:

+
$ cd ~/IOTstack
+$ docker-compose up -d
+
+

The first time you run up the stack docker will download all the images from DockerHub. How long this takes will depend on how many containers you selected and the speed of your internet connection.

+

Some containers also need to be built locally. Node-RED is an example. Depending on the Node-RED nodes you select, building the image can also take a very long time. This is especially true if you select the SQLite node.

+

Be patient (and, if you selected the SQLite node, ignore the huge number of warnings).

+ +

The commands in this menu execute shell scripts in the root of the project.

+

other menu items

+

The old and new menus differ in the options they offer. You should come back and explore them once your stack is built and running.

+

useful commands: docker & docker-compose

+

Handy rules:

+
    +
  • docker commands can be executed from anywhere, but
  • +
  • docker-compose commands need to be executed from within ~/IOTstack
  • +
+

starting your IOTstack

+

To start the stack:

+
$ cd ~/IOTstack
+$ docker-compose up -d
+
+

Once the stack has been brought up, it will stay up until you take it down. This includes shutdowns and reboots of your Raspberry Pi. If you do not want the stack to start automatically after a reboot, you need to stop the stack before you issue the reboot command.

+

logging journald errors

+

If you get docker logging error like:

+
Cannot create container for service [service name here]: unknown log opt 'max-file' for journald log driver
+
+
    +
  1. +

    Run the command:

    +
    $ sudo nano /etc/docker/daemon.json
    +
    +
  2. +
  3. +

    change:

    +
    "log-driver": "journald",
    +
    +

    to:

    +
    "log-driver": "json-file",
    +
    +
  4. +
+

Logging limits were added to prevent Docker using up lots of RAM if log2ram is enabled, or SD cards being filled with log data and degraded from unnecessary IO. See Docker Logging configurations

+

You can also turn logging off or set it to use another option for any service by using the IOTstack docker-compose-override.yml file mentioned at IOTstack/Custom.

+

Another approach is to change daemon.json to be like this:

+
{
+  "log-driver": "local",
+  "log-opts": {
+    "max-size": "1m"
+  }
+}
+
+

The local driver is specifically designed to prevent disk exhaustion. Limiting log size to one megabyte also helps, particularly if you only have a limited amount of storage.

+

If you are familiar with system logging where it is best practice to retain logs spanning days or weeks, you may feel that one megabyte is unreasonably small. However, before you rush to increase the limit, consider that each container is the equivalent of a small computer dedicated to a single task. By their very nature, containers tend to either work as expected or fail outright. That, in turn, means that it is usually only recent container logs showing failures as they happen that are actually useful for diagnosing problems.

+

starting an individual container

+

To start a particular container:

+
$ cd ~/IOTstack
+$ docker-compose up -d «container»
+
+

stopping your IOTstack

+

Stopping aka "downing" the stack stops and deletes all containers, and removes the internal network:

+
$ cd ~/IOTstack
+$ docker-compose down
+
+

To stop the stack without removing containers, run:

+
$ cd ~/IOTstack
+$ docker-compose stop
+
+

stopping an individual container

+

stop can also be used to stop individual containers, like this:

+
$ cd ~/IOTstack
+$ docker-compose stop «container»
+
+

This puts the container in a kind of suspended animation. You can resume the container with

+
$ cd ~/IOTstack
+$ docker-compose start «container»
+
+

You can also down a container:

+
$ cd ~/IOTstack
+$ docker-compose down «container»
+
+

Note:

+
    +
  • +

    If the down command returns an error suggesting that you can't use it to down a container, it actually means that you have an obsolete version of docker-compose. You should upgrade your system. The workaround is to you the old syntax:

    +
    $ cd ~/IOTstack
    +$ docker-compose rm --force --stop -v «container»
    +
    +
  • +
+

To reactivate a container which has been stopped and removed:

+
$ cd ~/IOTstack
+$ docker-compose up -d «container»
+
+

checking container status

+

You can check the status of containers with:

+
$ docker ps
+
+

or

+
$ cd ~/IOTstack
+$ docker-compose ps
+
+

viewing container logs

+

You can inspect the logs of most containers like this:

+
$ docker logs «container»
+
+

for example:

+
$ docker logs nodered
+
+

You can also follow a container's log as new entries are added by using the -f flag:

+
$ docker logs -f nodered
+
+

Terminate with a Control+C. Note that restarting a container will also terminate a followed log.

+

restarting a container

+

You can restart a container in several ways:

+
$ cd ~/IOTstack
+$ docker-compose restart «container»
+
+

This kind of restart is the least-powerful form of restart. A good way to think of it is "the container is only restarted, it is not rebuilt".

+

If you change a docker-compose.yml setting for a container and/or an environment variable file referenced by docker-compose.yml then a restart is usually not enough to bring the change into effect. You need to make docker-compose notice the change:

+
$ cd ~/IOTstack
+$ docker-compose up -d «container»
+
+

This type of "restart" rebuilds the container.

+

Alternatively, to force a container to rebuild (without changing either docker-compose.yml or an environment variable file):

+
$ cd ~/IOTstack
+$ docker-compose up -d --force-recreate «container»
+
+

See also updating images built from Dockerfiles if you need to force docker-compose to notice a change to a Dockerfile.

+

persistent data

+

Docker allows a container's designer to map folders inside a container to a folder on your disk (SD, SSD, HD). This is done with the "volumes" key in docker-compose.yml. Consider the following snippet for Node-RED:

+
volumes:
+  - ./volumes/nodered/data:/data
+
+

You read this as two paths, separated by a colon. The:

+
    +
  • external path is ./volumes/nodered/data
  • +
  • internal path is /data
  • +
+

In this context, the leading "." means "the folder containingdocker-compose.yml", so the external path is actually:

+
    +
  • ~/IOTstack/volumes/nodered/data
  • +
+

This type of volume is a +bind-mount, where the +container's internal path is directly linked to the external path. All +file-system operations, reads and writes, are mapped to directly to the files +and folders at the external path.

+

deleting persistent data

+

If you need a "clean slate" for a container, you can delete its volumes. Using InfluxDB as an example:

+
$ cd ~/IOTstack
+$ docker-compose rm --force --stop -v influxdb
+$ sudo rm -rf ./volumes/influxdb
+$ docker-compose up -d influxdb
+
+

When docker-compose tries to bring up InfluxDB, it will notice this volume mapping in docker-compose.yml:

+
    volumes:
+      - ./volumes/influxdb/data:/var/lib/influxdb
+
+

and check to see whether ./volumes/influxdb/data is present. Finding it not there, it does the equivalent of:

+
$ sudo mkdir -p ./volumes/influxdb/data
+
+

When InfluxDB starts, it sees that the folder on right-hand-side of the volumes mapping (/var/lib/influxdb) is empty and initialises new databases.

+

This is how most containers behave. There are exceptions so it's always a good idea to keep a backup.

+

stack maintenance

+
+

Breaking update

+

Recent changes will require manual steps +or you may get an error like:
+ERROR: Service "influxdb" uses an undefined network "iotstack_nw"

+
+

update Raspberry Pi OS

+

You should keep your Raspberry Pi up-to-date. Despite the word "container" suggesting that containers are fully self-contained, they sometimes depend on operating system components ("WireGuard" is an example).

+
$ sudo apt update
+$ sudo apt upgrade -y
+
+

git pull

+

Although the menu will generally do this for you, it does not hurt to keep your local copy of the IOTstack repository in sync with the master version on GitHub.

+
$ cd ~/IOTstack
+$ git pull
+
+

container image updates

+

There are two kinds of images used in IOTstack:

+
    +
  • Those not built using Dockerfiles (the majority)
  • +
  • +

    Those built using Dockerfiles (special cases)

    +
    +

    A Dockerfile is a set of instructions designed to customise an image before it is instantiated to become a running container.

    +
    +
  • +
+

The easiest way to work out which type of image you are looking at is to inspect the container's service definition in your docker-compose.yml file. If the service definition contains the:

+
    +
  • image: keyword then the image is not built using a Dockerfile.
  • +
  • build: keyword then the image is built using a Dockerfile.
  • +
+

updating images not built from Dockerfiles

+

If new versions of this type of image become available on DockerHub, your local IOTstack copies can be updated by a pull command:

+
$ cd ~/IOTstack
+$ docker-compose pull
+$ docker-compose up -d
+$ docker system prune
+
+

The pull downloads any new images. It does this without disrupting the running stack.

+

The up -d notices any newly-downloaded images, builds new containers, and swaps old-for-new. There is barely any downtime for affected containers.

+

updating images built from Dockerfiles

+

Containers built using Dockerfiles have a two-step process:

+
    +
  1. A base image is downloaded from from DockerHub; and then
  2. +
  3. The Dockerfile "runs" to build a local image.
  4. +
+

Node-RED is a good example of a container built from a Dockerfile. The Dockerfile defines some (or possibly all) of your add-on nodes, such as those needed for InfluxDB or Tasmota.

+

There are two separate update situations that you need to consider:

+
    +
  • If your Dockerfile changes; or
  • +
  • If a newer base image appears on DockerHub
  • +
+

Node-RED also provides a good example of why your Dockerfile might change: if you decide to add or remove add-on nodes.

+

Note:

+
    +
  • You can also add nodes to Node-RED using Manage Palette.
  • +
+
when Dockerfile changes (local image only)
+

When your Dockerfile changes, you need to rebuild like this:

+
$ cd ~/IOTstack
+$ docker-compose up --build -d «container»
+$ docker system prune
+
+

This only rebuilds the local image and, even then, only if docker-compose senses a material change to the Dockerfile.

+

If you are trying to force the inclusion of a later version of an add-on node, you need to treat it like a DockerHub update.

+

Key point:

+
    +
  • The base image is not affected by this type of update.
  • +
+

Note:

+
    +
  • +

    You can also use this type of build if you get an error after modifying Node-RED's environment:

    +
    $ cd ~/IOTstack
    +$ docker-compose up --build -d nodered
    +
    +
  • +
+
when DockerHub updates (base and local images)
+

When a newer version of the base image appears on DockerHub, you need to rebuild like this:

+
$ cd ~/IOTstack
+$ docker-compose build --no-cache --pull «container»
+$ docker-compose up -d «container»
+$ docker system prune
+$ docker system prune
+
+

This causes DockerHub to be checked for the later version of the base image, downloading it as needed.

+

Then, the Dockerfile is run to produce a new local image. The Dockerfile run happens even if a new base image was not downloaded in the previous step.

+

deleting unused images

+

As your system evolves and new images come down from DockerHub, you may find that more disk space is being occupied than you expected. Try running:

+
$ docker system prune
+
+

This recovers anything no longer in use. Sometimes multiple prune commands are needed (eg the first removes an old local image, the second removes the old base image).

+

If you add a container via menu.sh and later remove it (either manually or via menu.sh), the associated images(s) will probably persist. You can check which images are installed via:

+
$ docker images
+
+REPOSITORY               TAG                 IMAGE ID            CREATED             SIZE
+influxdb                 latest              1361b14bf545        5 days ago          264MB
+grafana/grafana          latest              b9dfd6bb8484        13 days ago         149MB
+iotstack_nodered         latest              21d5a6b7b57b        2 weeks ago         540MB
+portainer/portainer-ce   latest              5526251cc61f        5 weeks ago         163MB
+eclipse-mosquitto        latest              4af162db6b4c        6 weeks ago         8.65MB
+nodered/node-red         latest              fa3bc6f20464        2 months ago        376MB
+portainer/portainer      latest              dbf28ba50432        2 months ago        62.5MB
+
+

Both "Portainer CE" and "Portainer" are in that list. Assuming "Portainer" is no longer in use, it can be removed by using either its repository name or its Image ID. In other words, the following two commands are synonyms:

+
$ docker rmi portainer/portainer
+$ docker rmi dbf28ba50432
+
+

In general, you can use the repository name to remove an image but the Image ID is sometimes needed. The most common situation where you are likely to need the Image ID is after an image has been updated on DockerHub and pulled down to your Raspberry Pi. You will find two containers with the same name. One will be tagged "latest" (the running version) while the other will be tagged "\<none>" (the prior version). You use the Image ID to resolve the ambiguity.

+

pinning to specific versions

+

See container image updates to understand how to tell the difference between images that are used "as is" from DockerHub versus those that are built from local Dockerfiles.

+

Note:

+
    +
  • You should always visit an image's DockerHub page before pinning to a specific version. This is the only way to be certain that you are choosing the appropriate version suffix.
  • +
+

To pin an image to a specific version:

+
    +
  • +

    If the image comes straight from DockerHub, you apply the pin in docker-compose.yml. For example, to pin Grafana to version 7.5.7, you change:

    +
      grafana:
    +    container_name: grafana
    +    image: grafana/grafana:latest
    +    
    +
    +

    to:

    +
      grafana:
    +    container_name: grafana
    +    image: grafana/grafana:7.5.7
    +    
    +
    +

    To apply the change, "up" the container:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d grafana
    +
    +
  • +
  • +

    If the image is built using a local Dockerfile, you apply the pin in the Dockerfile. For example, to pin Mosquitto to version 1.6.15, edit ~/IOTstack/.templates/mosquitto/Dockerfile to change:

    +
    # Download base image
    +FROM eclipse-mosquitto:latest
    +…
    +
    +

    to:

    +
    # Download base image
    +FROM eclipse-mosquitto:1.6.15
    +…
    +
    +

    To apply the change, "up" the container and pass the --build flag:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d --build mosquitto
    +
    +
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/BuildStack-RandomPassword/index.html b/BuildStack-RandomPassword/index.html new file mode 100644 index 000000000..68b496d0d --- /dev/null +++ b/BuildStack-RandomPassword/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/BuildStack-Services/index.html b/BuildStack-Services/index.html new file mode 100644 index 000000000..26e4ea22c --- /dev/null +++ b/BuildStack-Services/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Containers/AdGuardHome/index.html b/Containers/AdGuardHome/index.html new file mode 100644 index 000000000..f253abc4b --- /dev/null +++ b/Containers/AdGuardHome/index.html @@ -0,0 +1,2353 @@ + + + + + + + + + + + + + + + + + + + + + + + + + AdGuard Home - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + +

AdGuard Home

+

References

+ +

Either AdGuard Home or PiHole, but not both

+

AdGuard Home and PiHole perform similar functions. They use the same ports so you can not run both at the same time. You must choose one or the other.

+

Quick Start

+

When you first install AdGuard Home:

+
    +
  1. +

    Use a web browser to connect to it using port 3001. For example:

    +
    http://raspberrypi.local:3001
    +
    +
  2. +
  3. +

    Click "Getting Started".

    +
  4. +
  5. +

    Change the port number for the Admin Web Interface to be "8089". Leave the other settings on the page at their defaults and click "Next".

    +
  6. +
  7. Enter a username and password and click "Next".
  8. +
  9. Click "Open Dashboard". This redirects to port 8089.
  10. +
  11. +

    After the initial setup, you connect to AdGuard Home via port 8089:

    +
    http://raspberrypi.local:8089
    +
    +
  12. +
+

About port 8089

+

Port 8089 is the default administrative user interface for AdGuard Home running under IOTstack.

+

Port 8089 is not active until you have completed the Quick Start procedure. You must start by connecting to port 3001.

+

Because of AdGuard Home limitations, you must take special precautions if you decide to change to a different port number:

+
    +
  1. +

    The internal and external ports must be the same; and

    +
  2. +
  3. +

    You must convince AdGuard Home that it is a first-time installation:

    +
    $ cd ~/IOTstack
    +$ docker-compose stop adguardhome
    +$ docker-compose rm -f adguardhome
    +$ sudo rm -rf ./volumes/adguardhome
    +$ docker-compose up -d adguardhome
    +
    +
  4. +
  5. +

    Repeat the Quick Start procedure, this time substituting the new Admin Web Interface port where you see "8089".

    +
  6. +
+

About port 3001:3000

+

Port 3001 (external, 3000 internal) is only used during Quick Start procedure. Once port 8089 becomes active, port 3001 ceases to be active.

+

In other words, you need to keep port 3001 reserved even though it is only ever used to set up port 8089.

+

About Host Mode

+

If you want to run AdGuard Home as your DHCP server, you need to put the container into "host mode". You need edit the AdGuard Home service definition in docker-compose.yml to:

+
    +
  1. +

    add the line:

    +
    network_mode: host
    +
    +
  2. +
  3. +

    remove the ports: directive and all of the port mappings.

    +
  4. +
+

Note:

+
    +
  • It is not really a good idea to offer DHCP services from a container. This is because containers generally start far too late in a boot process to be useful. If you want to use AdGuard Home for DHCP, you should probably consider a native installation.
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Adminer/index.html b/Containers/Adminer/index.html new file mode 100644 index 000000000..453539afe --- /dev/null +++ b/Containers/Adminer/index.html @@ -0,0 +1,2222 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Adminer - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Adminer

+

References

+ +

About

+

This is a nice tool for managing databases. Web interface has moved to port 9080. There was an issue where openHAB and Adminer were using the same ports. If you have an port conflict edit the docker-compose.yml and under the adminer service change the line to read: +

    ports:
+      - 9080:8080
+

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Blynk_server/index.html b/Containers/Blynk_server/index.html new file mode 100644 index 000000000..030dafbe8 --- /dev/null +++ b/Containers/Blynk_server/index.html @@ -0,0 +1,2747 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Blynk server - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Blynk server

+

This document discusses an IOTstack-specific version of Blynk-Server. It is built on top of an Ubuntu base image using a Dockerfile.

+

References

+ +

Acknowledgement:

+
    +
  • Original writeup from @877dev
  • +
+

Significant directories and files

+
~/IOTstack
+├── .templates
+│   └── blynk_server
+│       ├── Dockerfile ❶
+│       ├── docker-entrypoint.sh ❷
+│       ├── iotstack_defaults ❸
+│       │   ├── mail.properties
+│       │   └── server.properties
+│       └── service.yml ❹
+├── services
+│   └── blynk_server
+│       └── service.yml ❺
+├── docker-compose.yml ❻
+└── volumes
+    └── blynk_server ❼
+        ├── config ❽
+        │   ├── mail.properties
+        │   └── server.properties
+        └── data
+
+
    +
  1. The Dockerfile used to construct Blynk Server on top of Ubuntu.
  2. +
  3. A start-up script designed to handle container self-repair.
  4. +
  5. A folder holding the default versions of the configuration files.
  6. +
  7. The template service definition.
  8. +
  9. The working service definition (only relevant to old-menu, copied from ❹).
  10. +
  11. The Compose file (includes ❹).
  12. +
  13. The persistent storage area for the blynk_server container.
  14. +
  15. Working copies of the configuration files (copied from ❸).
  16. +
+

Everything in ❽:

+
    +
  • will be replaced if it is not present when the container starts; but
  • +
  • will never be overwritten if altered by you.
  • +
+

How Blynk Server gets built for IOTstack

+

GitHub Updates

+

Periodically, the source code is updated and a new version is released. You can check for the latest version at the releases page.

+

IOTstack menu

+

When you select Blynk Server in the IOTstack menu, the template service definition is copied into the Compose file.

+
+

Under old menu, it is also copied to the working service definition and then not really used.

+
+

IOTstack first run

+

On a first install of IOTstack, you run the menu, choose your containers, and are told to do this:

+
$ cd ~/IOTstack
+$ docker-compose up -d
+
+

docker-compose reads the Compose file. When it arrives at the blynk_server fragment, it finds:

+
  blynk_server:
+    build:
+      context: ./.templates/blynk_server/.
+      args:
+        - BLYNK_SERVER_VERSION=0.41.16
+
+

The build statement tells docker-compose to look for:

+
~/IOTstack/.templates/blynk_server/Dockerfile
+
+

The BLYNK_SERVER_VERSION argument is passed into the build process. This implicitly pins each build to the version number in the Compose file (eg 0.41.16). If you need to update to a

+
+

The Dockerfile is in the .templates directory because it is intended to be a common build for all IOTstack users. This is different to the arrangement for Node-RED where the Dockerfile is in the services directory because it is how each individual IOTstack user's version of Node-RED is customised.

+
+

The Dockerfile begins with:

+
FROM ubuntu
+
+

The FROM statement tells the build process to pull down the base image from DockerHub.

+
+

It is a base image in the sense that it never actually runs as a container on your Raspberry Pi.

+
+

The remaining instructions in the Dockerfile customise the base image to produce a local image. The customisations are:

+
    +
  1. Add packages to satisfy dependencies.
  2. +
  3. Add the default versions of the configuration files so that the container can perform self-repair each time it is launched.
  4. +
  5. Download an install the Java package that implements the Blynk Server.
  6. +
+

The local image is instantiated to become your running container.

+

When you run the docker images command after Blynk Server has been built, you may see two rows that are relevant:

+
$ docker images
+REPOSITORY              TAG      IMAGE ID       CREATED         SIZE
+iotstack_blynk_server   latest   3cd6445f8a7e   3 hours ago     652MB
+ubuntu                  latest   897590a6c564   7 days ago      49.8MB
+
+
    +
  • ubuntu is the base image; and
  • +
  • iotstack_blynk_server is the local image.
  • +
+

You may see the same pattern in Portainer, which reports the base image as "unused". You should not remove the base image, even though it appears to be unused.

+
+

Whether you see one or two rows depends on the version of docker-compose you are using and how your version of docker-compose builds local images.

+
+

Logging

+

You can inspect Blynk Server's log by:

+
$ docker logs blynk_server
+
+

Changing Blynk Server's configuration

+

The first time you launch the blynk_server container, the following structure will be created in the persistent storage area:

+
~/IOTstack/volumes/blynk_server
+├── [drwxr-xr-x pi      ]  config
+│   ├── [-rw-r--r-- pi      ]  mail.properties
+│   └── [-rw-r--r-- pi      ]  server.properties
+└── [drwxr-xr-x root    ]  data
+
+

The two .properties files can be used to alter Blynk Server's configuration. When you make change to these files, you activate then by restarting the container:

+
$ cd ~/IOTstack
+$ docker-compose restart blynk_server
+
+

Getting a clean slate

+

Erasing Blynk Server's persistent storage area triggers self-healing and restores known defaults:

+

$ cd ~/IOTstack
+$ docker-compose down blynk_server
+$ sudo rm -rf ./volumes/blynk_server
+$ docker-compose up -d blynk_server
+
+Notes:

+
    +
  • +

    You can also remove individual configuration files and then trigger self-healing. For example, if you decide to edit server.properties and make a mess, you can restore the original default version like this:

    +
    $ cd ~/IOTstack
    +$ rm volumes/blynk_server/config/server.properties
    +$ docker-compose restart blynk_server
    +
    +
  • +
  • +

    See also if downing a container doesn't work

    +
  • +
+

Upgrading Blynk Server

+

To find out when a new version has been released, you need to visit the Blynk-Server releases page at GitHub.

+

At the time of writing, version 0.41.16 was the most up-to-date. Suppose that version 0.41.17 has been released and that you decide to upgrade:

+
    +
  1. +

    Edit your Compose file to change the version nuumber:

    +
      blynk_server:
    +    build:
    +      context: ./.templates/blynk_server/.
    +      args:
    +        - BLYNK_SERVER_VERSION=0.41.17
    +
    +

    Note:

    +
      +
    • You can use this method to pin Blynk Server to any available version.
    • +
    +
  2. +
  3. +

    You then have two options:

    +
      +
    • +

      If you only want to reconstruct the local image:

      +
      $ cd ~/IOTstack
      +$ docker-compose up --build -d blynk_server
      +$ docker system prune -f
      +
      +
    • +
    • +

      If you want to update the Ubuntu base image at the same time:

      +
      $ cd ~/IOTstack
      +$ docker-compose build --no-cache --pull blynk_server
      +$ docker-compose up -d blynk_server
      +$ docker system prune -f
      +$ docker system prune -f
      +
      +

      The second prune will only be needed if there is an old base image and that, in turn, depends on the version of docker-compose you are using and how your version of docker-compose builds local images.

      +
    • +
    +
  4. +
+

Using Blynk Server

+

See the References for documentation links.

+

Connecting to the administrative UI

+

To connect to the administrative interface, navigate to:

+
https://<your pis IP>:9444/admin
+
+

You may encounter browser security warnings which you will have to acknowledge in order to be able to connect to the page. The default credentials are:

+
    +
  • username = admin@blynk.cc
  • +
  • password = admin
  • +
+

Change username and password

+
    +
  1. Click on Users > "email address" and edit email, name and password.
  2. +
  3. Save changes.
  4. +
  5. +

    Restart the container using either Portainer or the command line:

    +
    $ cd ~/IOTstack
    +$ docker-compose restart blynk_server
    +
    +
  6. +
+

Setup gmail

+

Optional step, useful for getting the auth token emailed to you. +(To be added once confirmed working....)

+

iOS/Android app setup

+
    +
  1. When setting up the application on your mobile be sure to select "custom" setup see.
  2. +
  3. Press "New Project"
  4. +
  5. Give it a name, choose device "Raspberry Pi 3 B" so you have plenty of virtual pins available, and lastly select WiFi.
  6. +
  7. Create project and the auth token will be emailed to you (if emails configured). You can also find the token in app under the phone app settings, or in the admin web interface by clicking Users>"email address" and scroll down to token.
  8. +
+

Quick usage guide for app

+
    +
  1. Press on the empty page, the widgets will appear from the right.
  2. +
  3. Select your widget, let's say a button.
  4. +
  5. It appears on the page, press on it to configure.
  6. +
  7. Give it a name and colour if you want.
  8. +
  9. Press on PIN, and select virtual. Choose any pin i.e. V0
  10. +
  11. Press ok.
  12. +
  13. To start the project running, press top right Play button.
  14. +
  15. You will get an offline message, because no devices are connected to your project via the token.
  16. +
+

Enter Node-Red.....

+

Node-RED

+
    +
  1. Install node-red-contrib-blynk-ws from Manage Palette.
  2. +
  3. Drag a "write event" node into your flow, and connect to a debug node
  4. +
  5. +

    Configure the Blynk node for the first time:

    +
    URL: wss://youripaddress:9444/websockets
    +
    +

    There is more information here. +4. Enter your auth token from before and save/exit. +5. When you deploy the flow, notice the app shows connected message, as does the Blynk node. +6. Press the button on the app, you will notice the payload is sent to the debug node.

    +
  6. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Chronograf/index.html b/Containers/Chronograf/index.html new file mode 100644 index 000000000..489c13d09 --- /dev/null +++ b/Containers/Chronograf/index.html @@ -0,0 +1,2316 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Chronograf - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Chronograf

+

References

+ +

Kapacitor integration

+

If you selected Kapacitor in the menu and want Chronograf to be able to interact with it, you need to edit docker-compose.yml to un-comment the lines which are commented-out in the following:

+
chronograf:
+  
+  environment:
+  
+  # - KAPACITOR_URL=http://kapacitor:9092
+  depends_on:
+  
+  # - kapacitor
+
+

If the Chronograf container is already running when you make this change, run:

+
$ cd ~IOTstack
+$ docker-compose up -d chronograf
+
+

Upgrading Chronograf

+

You can update the container via:

+
$ cd ~/IOTstack
+$ docker-compose pull
+$ docker-compose up -d
+$ docker system prune
+
+

In words:

+
    +
  • docker-compose pull downloads any newer images;
  • +
  • docker-compose up -d causes any newly-downloaded images to be instantiated as containers (replacing the old containers); and
  • +
  • the prune gets rid of the outdated images.
  • +
+

Chronograf version pinning

+

If you need to pin to a particular version:

+
    +
  1. Use your favourite text editor to open docker-compose.yml.
  2. +
  3. +

    Find the line:

    +
    image: chronograf:latest
    +
    +
  4. +
  5. +

    Replace latest with the version you wish to pin to. For example, to pin to version 1.9.0:

    +
    image: chronograf:1.9.0
    +
    +
  6. +
  7. +

    Save the file and tell docker-compose to bring up the container:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d chronograf
    +$ docker system prune
    +
    +
  8. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/DashMachine/index.html b/Containers/DashMachine/index.html new file mode 100644 index 000000000..6fe7940ff --- /dev/null +++ b/Containers/DashMachine/index.html @@ -0,0 +1,2243 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DashMachine - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

DashMachine

+

References

+ +

Web Interface

+

The web UI can be found on "your_ip":5000.

+

The default credentials are: +* User: admin +* Password: admin

+

About DashMachine

+

DashMachine is a web application bookmark dashboard. It allows you to have all your application bookmarks available in one place, grouped and organized how you want to see them.

+

Within the context of IOTstack, DashMachine can help you organize your deployed services.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Deconz/index.html b/Containers/Deconz/index.html new file mode 100644 index 000000000..7c0277c5d --- /dev/null +++ b/Containers/Deconz/index.html @@ -0,0 +1,2388 @@ + + + + + + + + + + + + + + + + + + + + + + + + + deCONZ - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

deCONZ

+

References

+ +

Setup

+

Old menu (old menu branch)

+

If you use "old menu", you may get an error message similar to the following on first launch:

+
parsing ~/IOTstack/docker-compose.yml: error while interpolating services.deconz.devices.[]: required variable DECONZ_DEVICE_PATH is missing a value: eg echo DECONZ_DEVICE_PATH=/dev/serial0 >>~/IOTstack/.env
+
+

The message is telling you that you need to define the path to your deCONZ device. Common examples are:

+
    +
  • Raspbee at /dev/serial0
  • +
  • Conbee at /dev/ttyUSB0
  • +
  • Conbee II at /dev/ttyACM0
  • +
+

Once you have identified the appropriate device path, you can define it like this:

+
$ echo DECONZ_DEVICE_PATH=/dev/serial0 >>~/IOTstack/.env
+
+

This example uses /dev/serial0. Substitute your actual device path if it is different.

+

New menu (master branch)

+

New menu offers a sub-menu (place the cursor on deconz and press the right arrow) where you can select the appropriate device path.

+

Dialout group

+

Before running docker-compose up -d, make sure your Linux user is part of the dialout group, which allows the user access to serial devices (i.e. Conbee/Conbee II/RaspBee). If you are not certain, simply add your user to the dialout group by running the following command (username "pi" being used as an example):

+
$ sudo usermod -a -G dialout pi
+
+

Troubleshooting

+

Your Conbee/Conbee II/RaspBee gateway must be plugged in when the deCONZ Docker container is being brought up. If your gateway is not detected, or no lights can be paired, try moving the device to another usb port. A reboot may help too.

+

Use a 0.5-1m usb extension cable with ConBee (II) to avoid wifi and bluetooth noise/interference from your Raspberry Pi (recommended by the manufacturer and often the solution to poor performance).

+

Accessing the Phoscon UI

+

The Phoscon UI is available using port 8090 (http://your.local.ip.address:8090/)

+

Viewing the deCONZ Zigbee mesh

+

The Zigbee mesh can be viewed using VNC on port 5901. The default VNC password is "changeme".

+

Connecting deCONZ and Node-RED

+

Install node-red-contrib-deconz via the "Manage palette" menu in Node-RED (if not already installed) and follow these 2 simple steps (also shown in the video below):

+

Step 1: In the Phoscon UI, Go to Settings > Gateway > Advanced and click "Authenticate app".

+

Step 2: In Node-RED, open a deCONZ node, select "Add new deonz-server", insert your ip adress and port 8090 and click "Get settings". Click "Add", "Done" and "Deploy". Your device list will not be updated before deploying.

+

installing deCONZ

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/DiyHue/index.html b/Containers/DiyHue/index.html new file mode 100644 index 000000000..bbf996608 --- /dev/null +++ b/Containers/DiyHue/index.html @@ -0,0 +1,2242 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DIY hue - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

DIY hue

+ +

About

+

diyHue is a utility to contol the lights in your home

+

Setup

+

Before you start diyHue you will need to get your IP and MAC addresses. Run ip addr in the terminal

+

image

+

Enter these values into the ./services/diyhue/diyhue.env file

+

The default username and password it Hue and Hue respectively

+

Usage

+

The web interface is available on port 8070

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Domoticz/index.html b/Containers/Domoticz/index.html new file mode 100644 index 000000000..660acd20f --- /dev/null +++ b/Containers/Domoticz/index.html @@ -0,0 +1,2357 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Domoticz - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Domoticz

+

References

+ +

Invitation

+

There is no IOTstack documentation for Domoticz.

+

This is a standing invitation to anyone who is familiar with this container to submit a Pull Request to provide some documentation.

+

Environment Variables

+
    +
  • +

    TZ=${TZ:-Etc/UTC}

    +

    If TZ is defined in ~/IOTstack/.env then the value there is applied, otherwise the default of Etc/UTC is used. You can initialise .env like this:

    +
    $ cd ~/IOTstack
    +$ [ $(grep -c "^TZ=" .env) -eq 0 ] && echo "TZ=$(cat /etc/timezone)" >>.env
    +
    +
  • +
  • +

    LOG_PATH=/opt/domoticz/userdata/domoticz.log

    +

    This is disabled by default. If you enable it, Domoticz will write a log to that internal path. The path corresponds with the external path:

    +
    ~/IOTstack/volumes/domoticz/domoticz.log
    +
    +

    Note that this log is persistent. In other words, it will survive container restarts. This means you are responsible for pruning it from time to time. The Unix tradition for pruning logs is:

    +
    $ cd ~/IOTstack/volumes/domoticz/
    +$ cat /dev/null | sudo tee domoticz.log
    +
    +

    If, instead, you decide to delete the log file, you should stop the container first:

    +
    $ cd ~/IOTstack
    +$ docker-compose down domoticz
    +$ sudo rm ./volumes/domoticz/domoticz.log
    +$ docker-compose up -d domoticz
    +
    +
  • +
  • +

    EXTRA_CMD_ARG=

    +

    This is disabled by default. It can be enabled and used to override the default parameters and pass command-line parameters of your choosing to Domoticz.

    +
  • +
+

Devices

+

The service definition includes an x-devices: clause. The x- prefix has the same effect as commenting-out the entire clause. If you wish to map an external device into the container:

+
    +
  1. Adjust the left-hand-side of the example path to point to the device as it appears on your Raspberry Pi;
  2. +
  3. Remove the x- prefix.
  4. +
  5. +

    Recreate the container:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d domoticz
    +
    +
  6. +
+

Migration Notes

+
    +
  1. Older IOTstack service definitions for Domoticz used the lscr.io/linuxserver/domoticz:latest image. The current service definition uses the domoticz/domoticz:stable image.
  2. +
  3. +

    The location of the persistent store has changed, as has its relationship to the internal path:

    + + + + + + + + + + + + + + + + + + + + +
    service definitionpersistent storeinternal path
    older~/IOTstack/volumes/domoticz/dataconfig
    current~/IOTstack/volumes/domoticz/opt/domoticz/userdata
    +

    If you have have been using the older service definition and wish to upgrade to the current service definition, you can try migrating like this:

    +
    $ cd ~/IOTstack/volumes
    +$ sudo mv domoticz domoticz.old
    +$ sudo cp -a domoticz.old/data domoticz
    +
    +
  4. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Dozzle/index.html b/Containers/Dozzle/index.html new file mode 100644 index 000000000..f6778b3dd --- /dev/null +++ b/Containers/Dozzle/index.html @@ -0,0 +1,2239 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Dozzle - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Dozzle

+

Reference

+ +

Webinterface

+

Webninterface is available at "your_ip":8889

+

About Dozzle

+

Dozzle is a small lightweight application with a web based interface to monitor Docker logs. +It doesn’t store any log files. It is for live monitoring of your container logs only.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Duckdns/index.html b/Containers/Duckdns/index.html new file mode 100644 index 000000000..4ff38caf9 --- /dev/null +++ b/Containers/Duckdns/index.html @@ -0,0 +1,2305 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Duck DNS - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Duck DNS

+

Duckdns is a free public DNS service that provides you with a domain name you +can update to match your dynamic IP-address.

+

This container automates the process to keep the duckdns.org domain updated +when your IP-address changes.

+

Configuration

+

First, register an account, add your subdomain and get your token from +http://www.duckdns.org/

+

Either edit ~/IOTstack/docker-compose.yml or create a file +~/IOTstack/docker-compose.override.yml. Place your Duckdns token and +subdomain name (without .duckdns.org) there:

+
docker-compose.override.yml
version: '3.6'
+services:
+  duckdns:
+    environment:
+      TOKEN: your-duckdns-token
+      SUBDOMAINS: subdomain
+
+

Observe that at least the initial update is successful:

+
$ cd ~/IOTstack
+$ docker-compose up -d duckdns
+$ docker-compose logs -f duckdns
+...SNIP...
+duckdns    | Sat May 21 11:01:00 UTC 2022: Your IP was updated
+...SNIP...
+(ctrl-c to stop following the log)
+
+

If there is a problem, check that the resulting effective configuration of +'duckdns:' looks OK: +

$ cd ~/IOTstack && docker-compose config
+

+

Domain name for the private IP

+
+

Example public/private IP:s and domains

+
flowchart
+I([Internet])
+G("Router\npublic IP: 52.85.51.71\nsubdomain.duckdns.org")
+R(Raspberry pi\nprivate IP: 192.168.0.100\nprivate_subdomain.duckdns.org)
+I --- |ISP| G --- |LAN| R
+
+

As a public DNS server, Duckdns is not meant to be used for private IPs. It's +recommended that for resolving internal LAN IPs you use the Pi +Hole container or run a dedicated DNS server.

+

That said, it's possible to update a Duckdns subdomain to your private LAN IP. +This may be convenient if you have devices that don't support mDNS (.local) or +don't want to run Pi-hole. This is especially useful if you can't assign a +static IP to your RPi. No changes to your DNS resolver settings are needed.

+

First, as for the public subdomain, add the domain name to your Duckdns account +by logging in from their homepage. Then add a PRIVATE_SUBDOMAINS variable +indicating this subdomain:

+
version: '3.6'
+services:
+  duckdns:
+    environment:
+      TOKEN: ...
+      SUBDOMAINS: ...
+      PRIVATE_SUBDOMAINS: private_subdomain
+
+

References

+ + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/EspruinoHub/index.html b/Containers/EspruinoHub/index.html new file mode 100644 index 000000000..3eeb0de14 --- /dev/null +++ b/Containers/EspruinoHub/index.html @@ -0,0 +1,2144 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Espruinohub - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Espruinohub

+

This is a testing container

+

I tried it however the container keeps restarting docker logs espruinohub I get "BLE Broken?" but could just be i dont have any BLE devices nearby

+

web interface is on "{your_Pis_IP}:1888"

+

see EspruinoHub#status--websocket-mqtt--espruino-web-ide for other details.

+

there were no recommendations for persistent data volumes. so docker-compose down may destroy all you configurations so use docker-compose stop in stead

+

Please check existing issues if you encounter a problem, and then open a new issue if your problem has not been reported.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Grafana/index.html b/Containers/Grafana/index.html new file mode 100644 index 000000000..9703ad52e --- /dev/null +++ b/Containers/Grafana/index.html @@ -0,0 +1,2459 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Grafana - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Grafana

+

References

+ +

Adding InfluxDB datasource

+

When you have logged into Grafana (default user/pass: admin/admin), you have +to add a data source to be used for the graphs.

+

Select Data Sources -> Add data source -> InfluxDB.

+

Set options:

+
    +
  • HTTP / URL: http://influxdb:8086
  • +
  • InfluxDB Details / Database: telegraf
  • +
  • InfluxDB Details / User: nodered
  • +
  • InfluxDB Details / Password: nodered
  • +
+

Overriding configuration variables

+

Grafana documentation contains a list of +settings. +Settings are described in terms of how they appear in ".ini" files.

+

Grafana configuration is usually done in grafana.ini, but when used via +docker as the IOTstack does, it should be configured using environment +variables.

+

Edit docker-compose.yml and find grafana: and under it +environment: this is where you can place the ini-options, but formatted as: +

    - GF_<SectionName>_<KeyName>=<value>
+
+If you are using old-menu edit ~/IOTstack/services/grafana/grafana.env +instead and add the lines directly there, but without the leading dash: +GF_<SectionName>_<KeyName>=<value>

+

For any changes to take effect you need recreate the Grafana container:

+
$ docker-compose up -d grafana
+
+

Setting your time-zone

+

Change the right hand side to your own +timezone:

+
    - TZ=Etc/UTC
+
+

Anonymous login

+

To allow anonymous logins add:

+
    - GF_AUTH_ANONYMOUS_ENABLED=true
+
+ +

If you do not change anything then, when you bring up the stack and use a browser to connect to your Raspberry Pi on port 3000, Grafana will:

+
    +
  • Expect you to login as user "admin" with password "admin"; and then
  • +
  • Force you to change the default password to something else.
  • +
+

Thereafter, you will login as "admin" with whatever password you chose. You can change the administrator's password as often as you like via the web UI (profile button, change password tab).

+

This default operation can be changed by configuration options. They will have +any effect only if Grafana has just been added to the stack, but has never +been launched. Thus, if the folder ~/IOTstack/volumes/grafana exists, Grafana +has already been started, and adding and changing these options will not +have any effect.

+

To customize, editing the file as describe above, add the following lines under +the environment: clause. For example, to set the administrative username to be "maestro" with password "123456":

+
    - GF_SECURITY_ADMIN_USER=maestro
+    - GF_SECURITY_ADMIN_PASSWORD=123456
+
+

If you change the default password, Grafana will not force you to change the +password on first login but you will still be able to change it via the web UI.

+

As a summary, the environment variables only take effect if you set them up before Grafana is launched for the first time:

+
    +
  • GF_SECURITY_ADMIN_USER has a default value of "admin". You can explicitly set it to "admin" or some other value. Whatever option you choose then that's the account name of Grafana's administrative user. But choosing any value other than "admin" is probably a bad idea.
  • +
  • GF_SECURITY_ADMIN_PASSWORD has a default value of "admin". You can explicitly set it to "admin" or some other value. If its value is "admin" then you will be forced to change it the first time you login to Grafana. If its value is something other than "admin" then that will be the password until you change it via the web UI.
  • +
+

Options with spaces

+

To set an options with a space, you must enclose the whole value in quotes:

+
    - "GF_AUTH_ANONYMOUS_ORG_NAME=Main Org."
+
+

HELP – I forgot my Grafana admin password!

+

Assuming Grafana is started, run:

+
$ docker exec grafana grafana cli admin reset-admin-password «NEWPASSWORD»
+
+

where «NEWPASSWORD» is the value of your choice.

+

Note:

+
    +
  • If you have customized GF_SECURITY_ADMIN_USER to be something other than "admin", the password change will be applied to that username. In other words, in the docker exec command above, the two references to "admin" are referring to the administrator's account, not the username of the administrator's account. Run the command "as is". Do not replace "admin" with the username of the administrator's account.
  • +
+

HELP - Resetting to a clean slate

+

"I made a bit of a mess with Grafana. First time user. Steep learning curve. False starts, many. Mistakes, unavoidable. Been there, done that. But now I really need to start from a clean slate. And, yes, I understand there is no undo for this."

+

Begin by stopping Grafana:

+
$ cd ~/IOTstack
+$ docker-compose down grafana
+
+
+

see also if downing a container doesn't work

+
+

You have two options:

+
    +
  1. +

    Destroy your settings and dashboards but retain any plugins you may have installed:

    +
    $ sudo rm ~/IOTstack/volumes/grafana/data/grafana.db
    +
    +
  2. +
  3. +

    Nuke everything (triple-check this command before you hit return):

    +
    $ sudo rm -rf ~/IOTstack/volumes/grafana/data
    +
    +
  4. +
+

This is where you should edit docker-compose.yml or +~/IOTstack/services/grafana/grafana.env to correct any problems (such as +choosing an administrative username other than "admin").

+

When you are ready, bring Grafana back up again:

+
$ cd ~/IOTstack
+$ docker-compose up -d grafana
+
+

Grafana will automatically recreate everything it needs. You will be able to login as "admin/admin" (or the credentials you set using GF_SECURITY_ADMIN_USER and GF_SECURITY_ADMIN_PASSWORD).

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Heimdall/index.html b/Containers/Heimdall/index.html new file mode 100644 index 000000000..6e28469d7 --- /dev/null +++ b/Containers/Heimdall/index.html @@ -0,0 +1,2247 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Heimdall - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Heimdall

+

References

+ +

Web Interface

+

The web UI can be found on:

+
    +
  • HTTP: "your_ip":8882
  • +
  • HTTPS: "your_ip":8883
  • +
+

About Heimdall

+

From the Heimdall website:

+
+

Heimdall Application Dashboard is a dashboard for all your web applications. It doesn't need to be limited to applications though, you can add links to anything you like. There are no iframes here, no apps within apps, no abstraction of APIs. if you think something should work a certain way, it probably does.

+
+

Within the context of IOTstack, the Heimdall Application Dashboard can help you organize your deployed services.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Home-Assistant/index.html b/Containers/Home-Assistant/index.html new file mode 100644 index 000000000..02a2be72b --- /dev/null +++ b/Containers/Home-Assistant/index.html @@ -0,0 +1,2547 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Home Assistant - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Home Assistant

+

Home Assistant is a home automation platform. It is able to track and control all devices at your home and offer a platform for automating control.

+

References

+ +

Home Assistant: two versions

+

There are two versions of Home Assistant:

+
    +
  • Home Assistant Container; and
  • +
  • Supervised Home Assistant (also known as both "Hass.io" and "Home Assistant Core").
  • +
+

Each version:

+
    +
  • provides a web-based management interface on port 8123; and
  • +
  • runs in "host mode" in order to discover devices on your LAN, including devices communicating via multicast traffic.
  • +
+

Home Assistant Container runs as a single Docker container, and doesn't support all the features that Supervised Home Assistant does (such as add-ons). Supervised Home Assistant runs as a collection of Docker containers under its own orchestration.

+

The only method supported by IOTstack is Home Assistant Container.

+
+

To understand why, see about Supervised Home Assistant.

+
+

If Home Assistant Container will not do what you want then, basically, you will need two Raspberry Pis:

+ +

Installing Home Assistant Container

+

Home Assistant (Container) can be found in the Build Stack menu. Selecting it in this menu results in a service definition being added to:

+
~/IOTstack/docker-compose.yml
+
+

The normal IOTstack commands apply to Home Assistant Container such as:

+
$ cd ~/IOTstack
+$ docker-compose up -d
+
+

Using bluetooth from the container

+

In order to be able to use BT & BLE devices from HA integrations, make sure that Bluetooth is enabled:

+
$ hciconfig
+hci0:   Type: Primary  Bus: UART
+    BD Address: DC:89:FB:A6:32:4B  ACL MTU: 1021:8  SCO MTU: 64:1
+    UP RUNNING 
+    RX bytes:2003 acl:0 sco:0 events:159 errors:0
+    TX bytes:11583 acl:0 sco:0 commands:159 errors:0
+
+

The "UP" in the third line of output indicates that Bluetooth is enabled. If Bluetooth is not enabled, check:

+
$ grep "^AutoEnable" /etc/bluetooth/main.conf
+AutoEnable=true
+
+

If AutoEnable is either missing or not set to true, then:

+
    +
  1. +

    Use sudo to and your favouring text editor to open:

    +
    /etc/bluetooth/main.conf
    +
    +
  2. +
  3. +

    Find AutoEnable and make it true.

    +
    +

    If AutoEnable is missing, it needs to be added to the [Policy] section.

    +
    +
  4. +
  5. +

    Reboot your Raspberry Pi.

    +
  6. +
  7. Check that the Bluetooth interface is enabled.
  8. +
+

See also: Scribles: Auto Power On Bluetooth Adapter on Boot-up.

+

Possible service definition changes

+

Although the Home Assistant documentation does not mention this, it is possible that you may also need to make the following changes to the Home Assistant service definition in your docker-compose.yml:

+
    +
  • +

    Add the following mapping to the volumes: clause:

    +
    - /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket
    +
    +
  • +
  • +

    Add the following devices: clause:

    +
    devices:
    +  - "/dev/serial1:/dev/ttyAMA0"
    +  - "/dev/vcio:/dev/vcio"
    +  - "/dev/gpiomem:/dev/gpiomem"
    +
    +
  • +
+

Notes:

+
    +
  • These changes are specific to the Raspberry Pi. If you need Bluetooth support on non-Pi hardware, you will need to figure out the details for your chosen platform.
  • +
  • Historically, /dev/ttyAMA0 meant "the serial interface" on Raspberry Pis. Subsequently, it came to mean "the Bluetooth interface" where Bluetooth support was present. Now, /dev/serial1 is used to mean "the Raspberry Pi's Bluetooth interface". The example above maps that to the internal device /dev/ttyAMA0 because that is probably what the container expects. There are no guarantees and you may need to experiment with internal device names.
  • +
+

HTTPS with a valid SSL certificate

+

Some HA integrations (e.g google assistant) require your HA API to be +accessible via https with a valid certificate. You can configure HA to do this: +docs / +guide +or use a reverse proxy container, as described below.

+

The linuxserver Secure Web Access Gateway container +(swag) (Docker hub +docs) will automatically generate a +SSL-certificate, update the SSL certificate before it expires and act as a +reverse proxy.

+
    +
  1. First test your HA is working correctly: http://raspberrypi.local:8123/ (assuming +your RPi hostname is raspberrypi)
  2. +
  3. Make sure you have duckdns working.
  4. +
  5. On your internet router, forward public port 443 to the RPi port 443
  6. +
  7. +

    Add swag to ~/IOTstack/docker-compose.yml beneath the services:-line:

    +
      swag:
    +    image: ghcr.io/linuxserver/swag
    +    cap_add:
    +      - NET_ADMIN
    +    environment:
    +      - PUID=1000
    +      - PGID=1000
    +      - TZ=${TZ:-Etc/UTC}
    +      - URL=<yourdomain>.duckdns.org
    +      - SUBDOMAINS=wildcard
    +      - VALIDATION=duckdns
    +      - DUCKDNSTOKEN=<token>
    +      - CERTPROVIDER=zerossl
    +      - EMAIL=<e-mail> # required when using zerossl
    +    volumes:
    +      - ./volumes/swag/config:/config
    +    ports:
    +      - 443:443
    +    restart: unless-stopped
    +
    +

    Replace the bracketed values. Do NOT use any "-characters to enclose the values.

    +
  8. +
  9. +

    Start the swag container, this creates the file to be edited in the next step:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d
    +
    +

    Check it starts up OK: docker-compose logs -f swag. It will take a minute or two before it finally logs "Server ready".

    +
  10. +
  11. +

    Enable reverse proxy for raspberrypi.local. homassistant.* is already by default. and fix homeassistant container name ("upstream_app"):

    +
    $ cd ~/IOTstack
    +$ sed -e 's/server_name/server_name *.local/' \
    +  volumes/swag/config/nginx/proxy-confs/homeassistant.subdomain.conf.sample \
    +  > volumes/swag/config/nginx/proxy-confs/homeassistant.subdomain.conf
    +
    +
  12. +
  13. +

    Forward to correct IP when target is a container running in "network_mode: + host" (like Home Assistant does):

    +

    +

    Note: in order for copy-paste to work properly, the usual $-prompts are omitted
    cd ~/IOTstack
    +cat << 'EOF' | sudo tee volumes/swag/config/custom-cont-init.d/add-host.docker.internal.sh
    +#!/bin/sh
    +DOCKER_GW=$(ip route | awk 'NR==1 {print $3}')
    +
    +sed -i -e "s/upstream_app .*/upstream_app ${DOCKER_GW};/" \
    +   /config/nginx/proxy-confs/homeassistant.subdomain.conf
    +EOF
    +sudo chmod u+x volumes/swag/config/custom-cont-init.d/add-host.docker.internal.sh
    +

    +

    (This needs to be copy-pasted/entered as-is, ignore any "> "-prefixes printed +by bash)

    +
  14. +
  15. +

    (optional) Add reverse proxy password protection if you don't want to rely + on the HA login for security, doesn't affect API-access:

    +
    $ cd ~/IOTstack
    +$ sed -i -e 's/#auth_basic/auth_basic/' \
    +    volumes/swag/config/nginx/proxy-confs/homeassistant.subdomain.conf
    +$ docker-compose exec swag htpasswd -c /config/nginx/.htpasswd anyusername
    +
    +
  16. +
  17. +

    Add use_x_forwarded_for and trusted_proxies to your homeassistant http + config. The configuration + file is at volumes/home_assistant/configuration.yaml For a default install + the resulting http-section should be:

    +
    http:
    +   use_x_forwarded_for: true
    +   trusted_proxies:
    +     - 192.168.0.0/16
    +     - 172.16.0.0/12
    +     - 10.77.0.0/16
    +
    +
  18. +
  19. +

    Refresh the stack: cd ~/IOTstack && docker-compose stop && docker-compose + up -d (again may take 1-3 minutes for swag to start if it recreates + certificates)

    +
  20. +
  21. Test homeassistant is still working correctly: + http://raspberrypi.local:8123/ (assuming your RPi hostname is + raspberrypi)
  22. +
  23. +

    Test the reverse proxy https is working correctly: + https://raspberrypi.local/ (browser will issue a warning about wrong + certificate domain, as the certificate is issued for you duckdns-domain, we + are just testing)

    +

    Or from the command line in the RPi:

    +
    $ curl --resolve homeassistant.<yourdomain>.duckdns.org:443:127.0.0.1 \
    +    https://homeassistant.<yourdomain>.duckdns.org/
    +
    +

    (output should end in if (!window.latestJS) { }</script></body></html>)

    +
  24. +
  25. +

    And finally test your router forwards correctly by accessing it from + outside your LAN(e.g. using a mobile phone): + https://homeassistant.<yourdomain>.duckdns.org/ Now the certificate + should work without any warnings.

    +
  26. +
+

about Supervised Home Assistant

+

IOTstack used to offer a menu entry leading to a convenience script that could install Supervised Home Assistant. That script stopped working when Home Assistant changed their approach. The script's author made it clear that script's future was bleak so the affordance was removed from IOTstack.

+

For a time, you could manually install Supervised Home Assistant using their installation instructions for advanced users. Once you got HA working, you could install IOTstack, and the two would (mostly) happily coexist.

+

The direction being taken by the Home Assistant folks is to supply a ready-to-run image for your Raspberry Pi. They still support the installation instructions for advanced users but the requirements are very specific. In particular:

+
+

Debian Linux Debian 11 aka Bullseye (no derivatives)

+
+

Raspberry Pi OS is a Debian derivative and it is becoming increasingly clear that the "no derivatives" part of that requirement must be taken literally and seriously. Recent examples of significant incompatibilities include:

+ +

Because of the self-updating nature of Supervised Home Assistant, your Raspberry Pi might be happily running Supervised Home Assistant plus IOTstack one day, and suddenly start misbehaving the next day, simply because Supervised Home Assistant assumed it was in total control of your Raspberry Pi.

+

If you want Supervised Home Assistant to work, reliably, it really needs to be its own dedicated appliance. If you want IOTstack to work, reliably, it really needs to be kept well away from Supervised Home Assistant. If you want both Supervised Home Assistant and IOTstack, you really need two Raspberry Pis.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Homebridge/index.html b/Containers/Homebridge/index.html new file mode 100644 index 000000000..6753a72e0 --- /dev/null +++ b/Containers/Homebridge/index.html @@ -0,0 +1,2290 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Homebridge - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Homebridge

+

References

+ +

Configuration

+

Homebridge documentation has a comprehensive configuration guide which you are encouraged to read.

+

Homebridge is configured using environment variables. In IOTstack:

+
    +
  • If you are running new menu (master branch, the default), environment variables are kept inline in docker-compose.yml.
  • +
  • +

    If you are running old menu (old-menu branch), environment variables are at the path:

    +
    ~/IOTstack/services/homebridge/homebridge.env
    +
    +
  • +
+

In either case, you apply changes by editing the relevant file (docker-compose.yml or homebridge.env) and then:

+
$ cd ~/IOTstack
+$ docker-compose up -d homebridge
+
+

About "avahi"

+

"avahi", "multicast DNS", "Rendezvous", "Bonjour" and "ZeroConf" are synonyms.

+

Current Homebridge images disable avahi services by default. The Homebridge container runs in "host mode" which means it can participate in multicast traffic flows. If you have a plugin that requires avahi, it can enabled by setting the environment variable:

+
ENABLE_AVAHI=1
+
+

Web Interface

+

The web UI for Homebridge can be found on "your_ip":8581. You can change the port by adjusting the environment variable:

+
HOMEBRIDGE_CONFIG_UI_PORT=8581
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Homer/index.html b/Containers/Homer/index.html new file mode 100644 index 000000000..290ead247 --- /dev/null +++ b/Containers/Homer/index.html @@ -0,0 +1,2244 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Homer - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Homer

+

References

+ +

Web Interface

+

The web UI can be found on "your_ip":8881

+

About Homer

+

From the Homer README:

+
+

A dead simple static HOMepage for your servER to keep your services on hand, from a simple yaml configuration file.

+
+

You can find an example of the config.yml file here.

+

Within the context of IOTstack, Homer can help you organize your deployed services.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/InfluxDB/index.html b/Containers/InfluxDB/index.html new file mode 100644 index 000000000..15ecaea48 --- /dev/null +++ b/Containers/InfluxDB/index.html @@ -0,0 +1,3340 @@ + + + + + + + + + + + + + + + + + + + + + + + + + InfluxDB - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

InfluxDB

+

InfluxDB is a time series database. What that means is time is the primary key of each table.

+

Another feature of InfluxDB is the separation of attributes into:

+
    +
  • fields: which are intended to hold variable data (data that is likely to be different in each row, such as a temperature reading from a sensor); and
  • +
  • tags: which are intended to hold metadata (data that is unlikely to be different in each row, such as the name of the sensor).
  • +
+

InfluxDB has configurable aggregation and retention policies allowing measurement resolution reduction, storing all added data points for recent data and only aggregated values for older data.

+

References

+ +

Note:

+
    +
  • IOTstack uses the influxdb:1.8 image. Substituting the :latest tag will get you InfluxDB version 2 and will create a mess.
  • +
+

Configuration

+

All InfluxDB settings can be applied using environment variables. Environment variables override any settings in the InfluxDB configuration file:

+
    +
  • +

    Under "new menu" (master branch), environment variables are stored inline in

    +
    ~IOTstack/docker-compose.yml
    +
    +
  • +
  • +

    Under "old menu", environment variables are stored in:

    +
    ~/IOTstack/services/influxdb/influxdb.env
    +
    +
  • +
+

Whenever you change an environment variable, you activate it like this:

+
$ cd ~/IOTstack
+$ docker-compose up -d influxdb
+
+

The default service definition provided with IOTstack exposes the following environment variables:

+
    +
  • TZ=Etc/UTC set this to your local timezone. Do not use quote marks!
  • +
  • +

    INFLUXDB_HTTP_FLUX_ENABLED=false set this true if you wish to use Flux queries rather than InfluxQL:

    +
    +

    At the time of writing, Grafana queries use InfluxQL.

    +
    +
  • +
  • +

    INFLUXDB_REPORTING_DISABLED=false InfluxDB activates phone-home reporting by default. This variable disables it for IOTstack. You can activate it if you want your InfluxDB instance to send reports to the InfluxDB developers.

    +
  • +
  • +

    INFLUXDB_MONITOR_STORE_ENABLED=FALSE disables automatic creation of the _internal database. This database stores metrics about InfluxDB itself. The database is incredibly busy. Side-effects of enabling this feature include increased wear and tear on SD cards and, occasionally, driving CPU utilisation through the roof and generally making your IOTstack unstable.

    +
    +

    To state the problem in a nutshell: do you want Influx self-metrics, or do you want a usable IOTstack? You really can't have both. See also issue 19543.

    +
    +
  • +
  • +

    Authentication variables:

    +
      +
    • INFLUXDB_HTTP_AUTH_ENABLED=false
    • +
    • INFLUX_USERNAME=dba
    • +
    • INFLUX_PASSWORD=supremo
    • +
    +

    Misunderstanding the purpose and scope of these variables is a common mistake made by new users. Please do not guess! Please read Authentication before you enable or change any of these variables. In particular, dba and supremo are not defaults for database access.

    +
  • +
  • +

    UDP data acquisition variables:

    +
      +
    • INFLUXDB_UDP_ENABLED=false
    • +
    • INFLUXDB_UDP_BIND_ADDRESS=0.0.0.0:8086
    • +
    • INFLUXDB_UDP_DATABASE=udp
    • +
    +

    Read UDP support before making any decisions on these variables.

    +
  • +
+

about influxdb.conf

+

A lot of InfluxDB documentation and help material on the web refers to the influxdb.conf configuration file. Such instructions are only appropriate when InfluxDB is installed natively.

+

When InfluxDB runs in a container, changing influxdb.conf is neither necessary nor recommended. Anything that you can do with influxdb.conf can be done with environment variables.

+

However, if you believe that you have a use case that absolutely demands the use of influxdb.conf then you can set it up like this:

+
    +
  1. Make sure the InfluxDB container is running!
  2. +
  3. +

    Execute the following commands:

    +
    $ cd ~/IOTstack
    +$ docker cp influxdb:/etc/influxdb/influxdb.conf .
    +
    +
  4. +
  5. +

    Edit docker-compose.yml, find the influxdb service definition, and add the following line to the volumes: directive:

    +
    - ./volumes/influxdb/config:/etc/influxdb
    +
    +
  6. +
  7. +

    Execute the following commands:

    +
    $ docker-compose up -d influxdb
    +$ sudo mv influxdb.conf ./volumes/influxdb/config/
    +$ docker-compose restart influxdb
    +
    +
  8. +
+

At this point, you can start making changes to:

+
~/IOTstack/volumes/influxdb/config/influxdb.conf
+
+

You can apply changes by sending a restart to the container (as above). However, from time to time you may find that your settings disappear or revert to defaults. Make sure you keep good backups.

+

Connecting to InfluxDB

+

By default, InfluxDB runs in non-host mode and respects the following port-mapping directive in its service definition:

+
ports:
+  - "8086:8086"
+
+

If you are connecting from:

+
    +
  • +

    another container (eg Node-RED or Grafana) that is also running in non-host mode, use:

    +
    http://influxdb:8086
    +
    +

    In this context, 8086 is the internal (right hand side) port number.

    +
  • +
  • +

    either the Raspberry Pi itself or from another container running in host mode, use:

    +
    http://localhost:8086
    +
    +

    In this context, 8086 is the external (left hand side) port number.

    +
  • +
  • +

    a different host, you use either the IP address of the Raspberry Pi or its fully-qualified domain name. Examples:

    +
    http://192.168.1.10:8086
    +http://raspberrypi.local:8086
    +http://iot-hub.mydomain.com:8086
    +
    +

    In this context, 8086 is the external (left hand side) port number.

    +
  • +
+

Interacting with the Influx CLI

+

You can open the influx CLI interactive shell by:

+
$ docker exec -it influxdb influx
+Connected to http://localhost:8086 version 1.8.10
+InfluxDB shell version: 1.8.10
+>
+
+

The command prompt in the CLI is >. While in the CLI you can type commands such as:

+
> help
+> create database MYTESTDATABASE
+> show databases
+> USE MYTESTDATABASE
+> show measurements
+> show series
+> select * from «someMeasurement» where «someCriterion»
+
+

You may also wish to set retention policies on your databases. This is an example of creating a database named "mydb" where any data older than 52 weeks is deleted:

+
> create database mydb
+
+> show retention policies on mydb
+name    duration shardGroupDuration replicaN default
+----    -------- ------------------ -------- -------
+autogen 0s       168h0m0s           1        true
+
+> alter retention policy "autogen" on "mydb" duration 52w shard duration 1w replication 1 default
+
+> show retention policies on mydb
+name    duration  shardGroupDuration replicaN default
+----    --------  ------------------ -------- -------
+autogen 8736h0m0s 168h0m0s           1        true
+
+

To exit the CLI, either press Control+d or type:

+
> exit
+$
+
+

useful alias

+

Consider adding the following alias to your .bashrc:

+
alias influx='docker exec -it influxdb influx -precision=rfc3339'
+
+

With that alias installed, typing influx and pressing return, gets you straight into the influx CLI. The -precision argument tells the influx CLI to display dates in human-readable form. Omitting that argument displays dates as integer nanoseconds since 1970-01-01.

+

Note:

+ +

Authentication

+

warning

+

This tutorial also assumes that you do not have any existing databases so it starts by creating two. One database will be provided with access controls but the other will be left alone so that the behaviour can be compared.

+

However, you need to understand that enabling authentication in InfluxDB is all-or-nothing. If you have any existing InfluxDB databases, you will need to:

+
    +
  • define access rights for all of your databases; and
  • +
  • provide credentials to processes like Node-Red and Grafana that access your databases.
  • +
+

If you do not do this, your existing Node-Red flows, Grafana dashboards and other processes that write to or query your databases will stop working as soon as you activate authentication below.

+

create two test databases

+

Create two databases named "mydatabase1" and "mydatabase2":

+
$ influx
+> CREATE DATABASE "mydatabase1"
+> CREATE DATABASE "mydatabase2"
+
+
+

Typing influx didn't work? See useful alias above.

+
+

define users

+

Define an administrative user. In this example, that user is "dba" (database administrator) with the password "supremo":

+
> CREATE USER "dba" WITH PASSWORD 'supremo' WITH ALL PRIVILEGES
+
+
    +
  • Key point: the mixture of "double" and 'single' quotes is intentional and required.
  • +
+

Define some garden-variety users:

+
> CREATE USER "nodered_user" WITH PASSWORD 'nodered_user_pw'
+> CREATE USER "grafana_user" WITH PASSWORD 'grafana_user_pw'
+
+

You can define any usernames you like. The reason for using "nodered_" and "grafana_" prefixes in these examples is because those are common candidates in an IOTstack environment. The reason for the "_user" suffixes is to make it clear that a username is separate and distinct from a container name.

+

assign access rights

+

The user "dba" already has access to everything but, for all other users, you need to state which database(s) the user can access, and whether that access is:

+
    +
  • READ (aka read-only)
  • +
  • WRITE (aka write-only)
  • +
  • ALL (implies both READ and WRITE)
  • +
+
> GRANT WRITE ON "mydatabase1" TO "nodered_user"
+> GRANT READ ON "mydatabase1" TO "grafana_user"
+
+
    +
  • Key point: you CREATE a user once but you need to GRANT access to every database to which that user needs access.
  • +
+

Once you have finished defining users and assigning access rights, drop out of the influx CLI:

+
> exit
+$
+
+

activate authentication

+

Make sure you read the warning above, then edit the InfluxDB environment variables to enable this key:

+
- INFLUXDB_HTTP_AUTH_ENABLED=true
+
+

Put the change into effect by "upping" the container:

+
$ cd ~/IOTstack
+$ docker-compose up -d influxdb
+
+Recreating influxdb ... done
+
+

The up causes docker-compose to notice that the environment has changed, and to rebuild the container with the new settings.

+
    +
  • +

    Note: You should always wait for 30 seconds after a rebuild for InfluxDB to become available. Any time you see a message like this:

    +
    Failed to connect to http://localhost:8086: Get http://localhost:8086/ping: dial tcp 127.0.0.1:8086: connect: connection refused
    +Please check your connection settings and ensure 'influxd' is running.
    +
    +

    it simply means that you did not wait long enough. Be patient!

    +
  • +
+

experiments

+

Start the influx CLI:

+
$ influx
+
+

Unless you have also set up the INFLUX_USERNAME and INFLUX_PASSWORD environment variables (described later under Authentication Hints), your session will not be authenticated as any user so you will not be able to access either database:

+
> USE mydatabase1
+ERR: unable to parse authentication credentials
+DB does not exist!
+> USE mydatabase2
+ERR: unable to parse authentication credentials
+DB does not exist!
+
+
    +
  • Key point: This is what will happen to any of your pre-existing databases if you enable authentication without a lot of care. You must define users and access rights for all of your databases, and you must provide those credentials to the relevant processes like Node-Red and Grafana.
  • +
+

Authenticate as "nodered_user" and try again:

+
> AUTH
+username: nodered_user
+password: 
+> USE mydatabase1
+Using database mydatabase1
+> USE mydatabase2
+ERR: Database mydatabase2 doesn't exist. Run SHOW DATABASES for a list of existing databases.
+DB does not exist!
+
+

The "nodered_user" can access "mydatabase1" but not "mydatabase2". You will get similar behaviour for the "grafana_user" (try it).

+

Authenticate as the "dba" and try again:

+
> AUTH
+username: dba
+password: 
+> USE mydatabase1
+Using database mydatabase1
+> USE mydatabase2
+Using database mydatabase2
+
+

The super-user can access both databases.

+

To get a list of users:

+
> SHOW USERS
+user         admin
+----         -----
+dba          true
+nodered_user false
+grafana_user false
+
+
    +
  • Key point: you must be authenticated as the "dba" to run SHOW USERS.
  • +
+

To find out what privileges a user has on a database:

+
> SHOW GRANTS FOR "nodered_user"
+database    privilege
+--------    ---------
+mydatabase1 WRITE
+
+
    +
  • Key point: you must be authenticated as the "dba" to run SHOW GRANTS.
  • +
+

To test grants, you can try things like this:

+
AUTH
+username: nodered_user
+password: 
+> USE "mydatabase1"
+Using database mydatabase1
+> INSERT example somefield=123
+
+

"nodered_user" has WRITE access to "mydatabase1".

+
> SELECT * FROM example
+ERR: error authorizing query: nodered_user not authorized to execute statement 'SELECT * FROM example', requires READ on mydatabase1
+
+

"nodered_user" does not have READ access to "mydatabase1".

+

Authenticate as "grafana_user" and try the query again:

+
> AUTH
+username: grafana_user
+password: 
+> SELECT * FROM example
+name: example
+time                         somefield
+----                         ---------
+2020-09-19T01:41:09.6390883Z 123
+
+

"grafana_user" has READ access to "mydatabase1". Try an insertion as "grafana_user":

+
> INSERT example somefield=456
+ERR: {"error":"\"grafana_user\" user is not authorized to write to database \"mydatabase1\""}
+
+

"grafana_user" does not have WRITE access to "mydatabase1".

+

Change the privileges for "nodered_user" to ALL then try both an insertion and a query. Note that changing privileges requires first authenticating as "dba":

+
> AUTH
+username: dba
+password: 
+> GRANT ALL ON "mydatabase1" TO "nodered_user"
+> AUTH
+username: nodered_user
+password: 
+> INSERT example somefield=456
+> SELECT * FROM example
+name: example
+time                          somefield
+----                          ---------
+2020-09-19T01:41:09.6390883Z  123
+2020-09-19T01:42:36.85766382Z 456
+
+

"nodered_user" has both READ and WRITE access to "mydatabase1".

+

notes

+
    +
  1. +

    Some inferences to draw from the above:

    +
      +
    • user definitions are global rather than per-database. Grants are what tie users to particular databases.
    • +
    • setting INFLUXDB_HTTP_AUTH_ENABLED=true is how authentication is activated and enforced. If it is false, all enforcement goes away (a handy thing to know if you lose passwords or need to recover from a mess).
    • +
    • as the "HTTP" in INFLUXDB_HTTP_AUTH_ENABLED suggests, it applies to access via HTTP. This includes the influx CLI and processes like Node-Red and Grafana.
    • +
    +
  2. +
  3. +

    Always keep in mind that the InfluxDB log is your friend:

    +
    $ docker logs influxdb
    +
    +
  4. +
+

hints

+

After you enable authentication, there are a couple of ways of speeding-up your daily activities. You can pass the dba username and password on the end of the influx alias:

+
$ influx -database mydatabase1 -username dba -password supremo
+
+

but this is probably sub-optimal because of the temptation to hard-code your dba password into scripts. An alternative is to enable these environment variables:

+
- INFLUX_USERNAME=dba
+- INFLUX_PASSWORD=supremo
+
+

and then "up" the container as explained above to apply the changes.

+

Misunderstandings about the scope and purpose of INFLUX_USERNAME and INFLUX_PASSWORD are quite common so make sure you realise that the variables:

+
    +
  • do not "set" any username or password within InfluxDB;
  • +
  • only apply to starting the influx CLI – they are just synonyms for the -username and -password parameters on the influx CLI command; and
  • +
  • are not some kind of general-access credentials that apply to everything. They will not work from Node-RED or Grafana!
  • +
+

In other words, with INFLUX_USERNAME and INFLUX_PASSWORD added to the environment, the following two commands are identical:

+
$ influx -database mydatabase1 -username dba -password supremo
+$ influx -database mydatabase1
+
+

The INFLUX_USERNAME and INFLUX_PASSWORD variables also work if you start a shell into the InfluxDB container and then invoke the influx CLI from there:

+
$ docker exec -it influxdb bash
+# influx
+>
+
+

That is all the INFLUX_USERNAME and INFLUX_PASSWORD variables do.

+

cleaning up

+

To undo the steps in this tutorial, first set INFLUXDB_HTTP_AUTH_ENABLED=false and then "up" influxdb. Then:

+
$ influx
+> DROP USER "dba"
+> DROP USER "nodered_user"
+> DROP USER "grafana_user"
+> DROP DATABASE "mydatabase1"
+> DROP DATABASE "mydatabase2"
+> exit
+
+

UDP support

+

Assumptions:

+
    +
  • you want to enable UDP support; and
  • +
  • your goal is to log traffic arriving on UDP port 8086 into an InfluxDB database named "udp".
  • +
+

aliases

+

This tutorial uses the following aliases:

+
    +
  • influx - explained earlier - see useful alias.
  • +
  • +

    DPS which is the equivalent of:

    +
    $ docker ps --format "table {{.Names}}\t{{.RunningFor}}\t{{.Status}}"
    +
    +

    The focus is: what containers are running?

    +
  • +
  • +

    DNET which is the equivalent of:

    +
    $ docker ps --format "table {{.Names}}\t{{.Ports}}"
    +
    +

    The focus is: what ports are containers using?

    +
    +

    Any container where no ports are listed is either exposing no ports and/or is running in host mode.

    +
    +
  • +
+

Although both DPS & DNET invoke docker ps, the formatting means the output usually fits on your screen without line wrapping.

+

All three aliases are installed by IOTstackAliases.

+

confirm that UDP is not enabled

+
$ DNET
+NAMES      PORTS
+influxdb   0.0.0.0:8086->8086/tcp
+
+

Interpretation: Docker is listening on TCP port 8086, and is routing the traffic to the same port on the influxdb container. There is no mention of UDP.

+

create a database to receive the traffic

+

This tutorial uses the database name of "udp".

+
$ influx
+> create database udp
+> exit
+> $
+
+

define a UDP port mapping

+

Edit docker-compose.yml to define a UDP port mapping (the second line in the ports grouping below):

+
influxdb:
+  
+  ports:
+    - "8086:8086"
+    - "8086:8086/udp"
+  
+
+

enable UDP support

+

Edit your docker-compose.yml and change the InfluxDB environment variables to glue it all together:

+
environment:
+  - INFLUXDB_UDP_DATABASE=udp
+  - INFLUXDB_UDP_ENABLED=true
+  - INFLUXDB_UDP_BIND_ADDRESS=0.0.0.0:8086
+
+

In this context, the IP address "0.0.0.0" means "this host" (analogous to the way "255.255.255.255" means "all hosts").

+

rebuild the container

+
$ cd ~/IOTstack
+$ docker-compose up -d influxdb
+
+Recreating influxdb ... done
+
+

The up causes docker-compose to notice that the environment has changed, and to rebuild the container with the new settings.

+

confirm that UDP is enabled

+
$ DNET
+NAMES      PORTS
+influxdb   0.0.0.0:8086->8086/tcp, 0.0.0.0:8086->8086/udp
+
+

Interpretation: In addition to the TCP port, Docker is now listening on UDP port 8086, and is routing the traffic to the same port on the influxdb container.

+

check your work

+

Check the log:

+
$ docker logs influxdb
+
+

If you see a line like this:

+
ts=2020-09-18T03:09:26.154478Z lvl=info msg="Started listening on UDP" log_id=0PJnqbK0000 service=udp addr=0.0.0.0:8086
+
+

then everything is probably working correctly. If you see anything that looks like an error message then you will need to follow your nose.

+

start sending traffic

+

Although the how-to is beyond the scope of this tutorial, you will need a process that can send "line format" payloads to InfluxDB using UDP port 8086.

+

Once that is set up, you can inspect the results like this:

+
$ influx -database udp
+> show measurements
+
+

If data is being received, you will get at least one measurement name. An empty list implies no data is being received.

+

If you get at least one measurement name then you can inspect the data using:

+
> select * from «measurement»
+
+

where «measurement» is one of the names in the show measurements list.

+

Reducing flash wear-out

+

SSD-drives have pretty good controllers spreading out writes, so this isn't a this isn't really a concern for them. But if you store data on an SD-card, flash wear may cause the card to fail prematurely. Flash memory has a limited number of erase-write cycles per physical block. These blocks may be multiple megabytes. You can use sudo lsblk -D to see how big the erase granularity is on your card. The goal is to avoid writing lots of small changes targeting the same physical blocks. Here are some tips to mitigate SD-card wear:

+
    +
  • Don't use short retention policies. This may mask heavy disk IO without increasing disk space usage. Depending on the flash card and file system used, new data may be re-written to the same blocks that were freed by the expiration, wearing them out.
  • +
  • Take care not to add measurements too often. If possible no more often than once a minute. Add all measurements in one operation. Even a small write will physically write a whole new block and erase the previously used block.
  • +
  • Adding measurements directly to Influxdb will cause a write on every operation. If your client code can't aggregate multiple measurements into one write, consider routing them via Telegraf. It has the flush_interval-option, which will combine the measurements into one write.
  • +
  • All InfluxDB queries are logged by default and logs are written to the SD-card. To disable this, add into docker-compose.yml, next to the other INFLUXDB_* entries:
  • +
+
    - INFLUXDB_DATA_QUERY_LOG_ENABLED=false
+    - INFLUXDB_HTTP_LOG_ENABLED=false
+
+

This is especially important if you plan on having Grafana or Chronograf displaying up-to-date data on a dashboard, making queries all the time.

+

Debugging

+

Container won't start

+

Sometimes you need start the container without starting influxdb to access its maintenance tools. Usually when influx crashes on startup.

+

Add a new line below influxdb: to your docker-compose.yml:

+
influxdb:
+  
+  entrypoint: sleep infinity
+
+

Recreate the container using the new entrypoint:

+
$ docker-compose up -d influxdb
+Recreating influxdb ... done
+
+

Now the container should start and you can get a shell to poke around and try the influx_inspect command:

+
$ docker exec -it influxdb bash
+# influx_inspect
+Usage: influx_inspect [[command] [arguments]]
+
+

Once you have finished poking around, you should undo the change by removing the custom entrypoint and up -d again to return to normal container behaviour where you can then test to see if your fixes worked.

+

Adding packages

+

The container is pretty bare-bones by default. It is OK to install additional tools. Start by running:

+
# apt update
+
+

and then use apt install to add whatever you need. Packages you add will persist until the next time the container is re-created.

+

Sniffing traffic

+

If you need to see the actual packets being sent to Influx for insertion into your database, you can set it up like this:

+
$ docker exec influxdb bash -c 'apt update && apt install tcpdump -y'
+
+

That adds tcpdump to the running container and, as noted above, that will persist until you re-create the container.

+

To capture traffic:

+
$ docker exec influxdb tcpdump -i eth0 -s 0 -n -c 100 -w /var/lib/influxdb/capture.pcap dst port 8086
+
+

Breaking that down:

+
    +
  • -i eth0 is the container's internal virtual Ethernet network interface (attached to the internal bridged network)
  • +
  • -s 0 means "capture entire packets"
  • +
  • -n means "do not try to resolve IP addresses to domain names
  • +
  • -c 100 is optional and means "capture 100 packets then stop". If you omit this option, tcpdump will capture packets until you press control+C.
  • +
  • -w /var/lib/influxdb/capture.pcap is the internal path to the file where captured packets are written. You can, of course, substitute any filename you like for capture.pcap.
  • +
  • dst port 8086 captures all packets where the destination port field is 8086, which is the InfluxDB internal port number.
  • +
+

The internal path:

+
/var/lib/influxdb/capture.pcap
+
+

maps to the external path:

+
~/IOTstack/volumes/influxdb/data/capture.pcap
+
+

You can copy that file to another system where you have a tool like WireShark installed. WireShark will open the file and you can inspect packets and verify that the information being sent to InfluxDB is what you expect.

+

Do not forget to clean-up any packet capture files:

+
$ cd ~/IOTstack/volumes/influxdb/data
+$ sudo rm capture.pcap
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/InfluxDB2/index.html b/Containers/InfluxDB2/index.html new file mode 100644 index 000000000..ce96bba4b --- /dev/null +++ b/Containers/InfluxDB2/index.html @@ -0,0 +1,3155 @@ + + + + + + + + + + + + + + + + + + + + + + + + + InfluxDB 2 - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

InfluxDB 2

+

references

+ +

assumptions

+
    +
  1. +

    Your Raspberry Pi is running full 64-bit Raspberry Pi OS Debian GNU/Linux 11 (bullseye).

    +
      +
    • DockerHub does not have a 32-bit image for InfluxDB 2 so you can't run this container until you have upgraded.
    • +
    • Running full 64-bit is not the same as enabling the 64-bit kernel in /boot/config.txt. User-mode needs to be 64-bit capable as well. You must start from a full 64-bit image.
    • +
    +
  2. +
  3. +

    Node-RED is your principal mechanism for feeding data to InfluxDB 1.8.

    +
      +
    • You may have other services feeding data to InfluxDB 1.8 (eg Telegraf). The steps documented here will migrate all your existing data but do not discuss how to adapt services other than Node-RED to feed new data to InfluxDB 2.
    • +
    +
  4. +
  5. +

    Grafana is your principle mechanism for creating dashboards based on data stored in InfluxDB 1.8.

    +
      +
    • You may have other visualisation tools. You may gain insights from studying how Grafana needs to be changed to run Flux queries against InfluxDB 2 buckets but this documentation does not explore alternatives.
    • +
    +
  6. +
  7. +

    Node-RED, InfluxDB 1.8 and Grafana are all running in non-host mode on the same Docker instance, and that it is your intention to deploy InfluxDB 2 in non-host mode as well.

    +
      +
    • If you are running any containers in host mode or have distributed the services across multiple Docker instances, you will have to adapt appropriately.
    • +
    +
  8. +
+

terminology: database vs bucket

+

InfluxDB 1.8 and InfluxDB 2 are both database management systems (DBMS), sometimes referred to as "engines", optimised for storage and retrieval of time-series data. InfluxDB 1.8 uses the term database to mean a collection of measurements. InfluxDB 2 uses the term bucket to mean the same thing.

+

When an InfluxDB 1.8 database is migrated, it becomes an InfluxDB 2 bucket. You will see this change in terminology in various places, such as the InfluxDB-out node in Node-RED. When that node is set to:

+
    +
  • +

    Version 1.x, the user interface has a "Database" field which travels with the connection. For example:

    +
      +
    • [v1.x] influxdb:8086/power (set up in the connection sheet)
    • +
    +

    This implies that you need one connection per database.

    +
  • +
  • +

    Version 2.0, the user interface has a "Bucket" field which is independent of the connection. For example:

    +
      +
    • [v2.0] influxdb2:8086 (set up in the connection sheet)
    • +
    • Bucketpower/autogen (set up in the node)
    • +
    +

    This implies that you need one connection per engine. It is a subtle but important difference.

    +
  • +
+

reference service definition

+

The InfluxDB 2 service definition is added to your compose file by the IOTstack menu.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
influxdb2:
+  container_name: influxdb2
+  image: "influxdb:latest"
+  restart: unless-stopped
+  environment:
+    - TZ=Etc/UTC
+    - DOCKER_INFLUXDB_INIT_USERNAME=me
+    - DOCKER_INFLUXDB_INIT_PASSWORD=mypassword
+    - DOCKER_INFLUXDB_INIT_ORG=myorg
+    - DOCKER_INFLUXDB_INIT_BUCKET=mybucket
+    - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token
+    - DOCKER_INFLUXDB_INIT_MODE=setup
+  # - DOCKER_INFLUXDB_INIT_MODE=upgrade
+  ports:
+    - "8087:8086"
+  volumes:
+    - ./volumes/influxdb2/data:/var/lib/influxdb2
+    - ./volumes/influxdb2/config:/etc/influxdb2
+    - ./volumes/influxdb2/backup:/var/lib/backup
+  # - ./volumes/influxdb.migrate/data:/var/lib/influxdb:ro
+  healthcheck:
+    test: ["CMD", "influx", "ping"]
+    interval: 30s
+    timeout: 10s
+    retries: 3
+    start_period: 30s
+
+

As an alternative to using the menu, you can copy and paste the service definition into your compose file from the template at:

+
~/IOTstack/.templates/influxdb2/service.yml
+
+

required edits

+

Edit the service definition in your compose file to change the following variables:

+
    +
  • +

    TZ=«country»/«city»

    +
  • +
  • +

    DOCKER_INFLUXDB_INIT_USERNAME=«username»

    +

    This name becomes the administrative user. It is associated with your «password» and «token».

    +
  • +
  • +

    DOCKER_INFLUXDB_INIT_PASSWORD=«password»

    +

    Your «username» and «password» form your login credentials when you administer InfluxDB 2 using its web-based graphical user interface. The strength of your password is up to you.

    +
  • +
  • +

    DOCKER_INFLUXDB_INIT_ORG=«organisation»

    +

    An organisation name is required. Examples:

    +
      +
    • myorg
    • +
    • my-house
    • +
    • com.mydomain.myhouse
    • +
    +
  • +
  • +

    DOCKER_INFLUXDB_INIT_BUCKET=«bucket»

    +

    A default bucket name is required. The name does not matter because you won't actually be using it so you can accept the default of "mybucket". You can delete the unused bucket later if you want to be tidy.

    +
  • +
  • +

    DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=«token»

    +

    Although you can let InfluxDB 2 generate your access token for you, it will keep things simple if you generate your own. Here are some possible approaches:

    +
      +
    1. +

      use a universally-unique ID:

      +
      $ uuidgen
      +4fef85b4-2f56-480f-b143-fa5cb6e8f18a
      +
      +
    2. +
    3. +

      use GnuPG to generate a random string:

      +
      $ gpg --gen-random -a 0 25
      +bYS3EsnnY0AlRxJ2uk44Hzwm7GMKYu5unw==
      +
      +
    4. +
    5. +

      use a password-generator of your choosing.

      +
    6. +
    +
  • +
+

Note:

+
    +
  • Unless a container's documentation explicitly states that it is supported, you should never use quote marks to encapsulate the values you supply via environment variables. InfluxDB 2 treats quotes as being part of the value (eg a password of "fred" is the 6-character string that includes the quotes). If you put quote marks around anything as you were editing, please go back and remove them.
  • +
+

Table 1: mode-specific directives

+

InfluxDB 2 operates in three distinct modes which are controlled by the DOCKER_INFLUXDB_INIT_MODE environment variable. The table below summarises the variables and volumes mappings that need to be active in each mode.

+

Table 1

+

initialising InfluxDB 2

+

If you have only just included the template service definition in your compose file and performed the required edits, then you can follow the initialisation process below.

+

However, if you want to re-initialise the container, go to re-initialising InfluxDB 2.

+

To initialise InfluxDB 2:

+
    +
  1. Confirm that the service definition directives are set as per the "setup" column of Table 1.
  2. +
  3. +

    Be in the correct directory (assumed throughout):

    +
    $ cd ~/IOTstack
    +
    +
  4. +
  5. +

    Start the InfluxDB 2 container:

    +
    $ docker-compose up -d influxdb2
    +
    +
  6. +
  7. +

    InfluxDB 2 will notice the following environment variable:

    +
    DOCKER_INFLUXDB_INIT_MODE=setup
    +
    +

    This instructs the container to initialise the database engine structures based on a combination of defaults and the values you provide via the other environment variables.

    +
  8. +
  9. +

    Confirm that the InfluxDB 2 container is not in a restart loop and isn't reporting errors by using commands like:

    +
    $ docker ps
    +$ docker logs influxdb2
    +
    +
  10. +
+

If you don't need to migrate any data from InfluxDB 1.8 you can go straight to running InfluxDB 2, otherwise follow the data-migration procedure instructions below.

+

data-migration procedure

+

Successful migration depends on the following assumptions being true:

+
    +
  • The InfluxDB 2 container is running and has just been initialised as per initialising InfluxDB 2.
  • +
  • +

    The InfluxDB 1.8 container is running, and is based on the IOTstack service definition (or reasonable facsimile) at:

    +
    ~/IOTstack/.templates/influxdb/service.yml
    +
    +
  • +
+

To migrate your InfluxDB 1.8 data:

+
    +
  1. +

    Be in the correct directory (assumed throughout):

    +
    $ cd ~/IOTstack
    +
    +
  2. +
  3. +

    InfluxDB 1.8 runs as root and its persistent store is owned by root but not all files and folders in the persistent store are group or world readable. InfluxDB 2 runs as user ID 1000 (user "influxdb" inside the container). Because of this, you need to give InfluxDB 2 permission to read the InfluxDB 1.8 persistent store.

    +

    It is not a good idea to interfere with a persistent store while a container is running so best practice is to stop InfluxDB 1.8 for long enough to make a copy of its persistent store:

    +
    $ sudo rm -rf ./volumes/influxdb.migrate
    +$ docker-compose down influxdb
    +$ sudo cp -a ./volumes/influxdb ./volumes/influxdb.migrate
    +$ docker-compose up -d influxdb
    +$ sudo chown -R 1000:1000 ./volumes/influxdb.migrate/data
    +
    +
    +

    see also if downing a container doesn't work

    +
    +

    In words:

    +
      +
    1. Ensure any previous attempts at migration are removed. Always be extremely careful with any sudo rm command. Check your work before you press return.
    2. +
    3. Stop InfluxDB 1.8.
    4. +
    5. Make a copy of the InfluxDB 1.8 persistent store.
    6. +
    7. Start InfluxDB 1.8 again.
    8. +
    9. Change ownership of the copy of the InfluxDB 1.8 persistent store.
    10. +
    +
  4. +
  5. +

    Edit your compose file as per the "upgrade" column of Table 1. The changes you need to make are:

    +
      +
    1. +

      Change the initialisation mode from setup to upgrade:

      +
        +
      • +

        before editing:

        +
        12
        +13
            - DOCKER_INFLUXDB_INIT_MODE=setup
        +  # - DOCKER_INFLUXDB_INIT_MODE=upgrade
        +
        +
      • +
      • +

        after editing:

        +
        12
        +13
          # - DOCKER_INFLUXDB_INIT_MODE=setup
        +    - DOCKER_INFLUXDB_INIT_MODE=upgrade
        +
        +
      • +
      +
    2. +
    3. +

      Activate the volume mapping to give InfluxDB 2 read-only access to the copy of the InfluxDB 1.8 persistent store that you made in step 2:

      +
        +
      • +

        before editing:

        +
        20
          # - ./volumes/influxdb.migrate/data:/var/lib/influxdb:ro
        +
        +
      • +
      • +

        after editing:

        +
        20
            - ./volumes/influxdb.migrate/data:/var/lib/influxdb:ro
        +
        +
      • +
      +
    4. +
    +

    Save your work but do not execute any docker-compose commands.

    +
  6. +
  7. +

    InfluxDB 2 creates a "bolt" (lock) file to prevent accidental data-migrations. That file needs to be removed:

    +
    $ rm ./volumes/influxdb2/data/influxd.bolt
    +
    +
  8. +
  9. +

    The InfluxDB 2 container is still running. The following command causes the container to be recreated with the edits you made in step 3:

    +
    $ docker-compose up -d influxdb2
    +
    +
  10. +
  11. +

    InfluxDB 2 will notice the following environment variable:

    +
    DOCKER_INFLUXDB_INIT_MODE=upgrade
    +
    +

    This, combined with the absence of the "bolt" file, starts the migration process. You need to wait until the migration is complete. The simplest way to do that is to watch the size of the persistent store for InfluxDB 2 until it stops increasing. Experience suggests that the InfluxDB 2 persistent store will usually be a bit larger than InfluxDB 1.8. For example:

    +
      +
    • +

      reference size for an InfluxDB 1.8 installation:

      +
      $ sudo du -sh ./volumes/influxdb
      +633M    ./volumes/influxdb
      +
      +
    • +
    • +

      final size after migration to InfluxDB 2:

      +
      $ sudo du -sh ./volumes/influxdb2
      +721M    ./volumes/influxdb2
      +
      +
    • +
    +
  12. +
  13. +

    Data migration is complete once the folder size stops changing.

    +
  14. +
+

Proceed to running InfluxDB 2 below.

+

running InfluxDB 2

+

The container now needs to be instructed to run in normal mode.

+
    +
  1. +

    Be in the correct directory (assumed throughout):

    +
    $ cd ~/IOTstack
    +
    +
  2. +
  3. +

    Edit your compose file as per the "(omitted)" column of Table 1. The changes are:

    +
      +
    1. +

      Deactivate all DOCKER_INFLUXDB_INIT_ environment variables. After editing, the relevant lines should look like:

      +
       7
      + 8
      + 9
      +10
      +11
      +12
      +13
        # - DOCKER_INFLUXDB_INIT_USERNAME=me
      +  # - DOCKER_INFLUXDB_INIT_PASSWORD=mypassword
      +  # - DOCKER_INFLUXDB_INIT_ORG=myorg
      +  # - DOCKER_INFLUXDB_INIT_BUCKET=mybucket
      +  # - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token
      +  # - DOCKER_INFLUXDB_INIT_MODE=setup
      +  # - DOCKER_INFLUXDB_INIT_MODE=upgrade
      +
      +
    2. +
    3. +

      Deactivate the volume mapping if it is active. After editing, the line should look like:

      +
      20
        # - ./volumes/influxdb.migrate/data:/var/lib/influxdb:ro
      +
      +
    4. +
    +

    Save your work.

    +
  4. +
  5. +

    The InfluxDB 2 container is still running. The following command causes the container to be recreated with the edits you have just made:

    +
    $ docker-compose up -d influxdb2
    +
    +

    The absence of an active DOCKER_INFLUXDB_INIT_MODE variable places InfluxDB 2 into normal run mode.

    +
  6. +
  7. +

    If you have just performed a data migration, you can remove the copy of the InfluxDB 1.8 persistent store:

    +
    $ sudo rm -rf ./volumes/influxdb.migrate
    +
    +
    +

    always be extremely careful with any sudo rm command. Always check your work before you press return.

    +
    +
  8. +
+

re-initialising InfluxDB 2

+

If you need to start over from a clean slate:

+
    +
  1. +

    Be in the correct directory (assumed throughout):

    +
    $ cd ~/IOTstack
    +
    +
  2. +
  3. +

    Terminate the InfluxDB 2 container:

    +
    $ docker-compose down influxdb2
    +
    +
    +

    see also if downing a container doesn't work

    +
    +
  4. +
  5. +

    Remove the persistent store:

    +
    $ sudo rm -rf ./volumes/influxdb2
    +
    +
    +

    always be extremely careful with any sudo rm command. Always check your work before you press return.

    +
    +
  6. +
  7. +

    Edit your compose file as per the "setup" column of Table 1. After editing, the relevant lines should look like this:

    +
     7
    + 8
    + 9
    +10
    +11
    +12
    +13
        - DOCKER_INFLUXDB_INIT_USERNAME=me
    +    - DOCKER_INFLUXDB_INIT_PASSWORD=mypassword
    +    - DOCKER_INFLUXDB_INIT_ORG=myorg
    +    - DOCKER_INFLUXDB_INIT_BUCKET=mybucket
    +    - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token
    +    - DOCKER_INFLUXDB_INIT_MODE=setup
    +  # - DOCKER_INFLUXDB_INIT_MODE=upgrade
    +
    +
  8. +
+

Go to initialising InfluxDB 2.

+

exploring InfluxDB 2 data

+

browse data

+
    +
  1. +

    Launch a browser and connect it to port 8087 on your Raspberry Pi. For example:

    +
    http://raspberrypi.local:8087
    +
    +

    You can also use the IP address or domain name of your Raspberry Pi. In this context, 8087 is the external port number from the left hand side of the port mapping in the service definition:

    +
    14
    +15
      ports:
    +    - "8087:8086"
    +
    +
  2. +
  3. +

    Sign in to the InfluxDB 2 instance using your «username» and «password».

    +
  4. +
  5. +

    Click on "Explore" in the left-hand tool strip. That is marked [A] in the screen shot. In the area marked [B] you should be able to see a list of the buckets that were migrated from InfluxDB 1.8 databases.

    +

    In the screen shot, I clicked on other fields to create a query:

    +
      +
    • In area [B], I selected the "power/autogen" bucket;
    • +
    • In area [C], I selected the "hiking2" (electricity meter) measurement;
    • +
    • In area [D], I selected the "voltage" field;
    • +
    • The bucket in this test is a migrated copy of an InfluxDB 1.8 database. It was not ingesting live data so I also needed to change the duration popup menu [E] to a time-span that included the most-recent insertions;
    • +
    • Then I clicked the "Submit" button [F]; and
    • +
    • The result was the graph in [G].
    • +
    +
  6. +
+

You can explore your own tables using similar techniques.

+

Flux queries via point-and-click

+

Grafana does not (yet) seem to have the ability to let you build Flux queries via point-and-click like you can with InfluxQL queries. Until Grafana gains that ability, it's probably a good idea to learn how to build Flux queries in InfluxDB, so you can copy-and-paste the Flux statements into Grafana.

+

Once you have constructed a query in the "Query Builder", click the "Script Editor" button [H] to switch to the editor view.

+

For this example, the query text is:

+
from(bucket: "power/autogen")
+  |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
+  |> filter(fn: (r) => r["_measurement"] == "hiking2")
+  |> filter(fn: (r) => r["_field"] == "voltage")
+  |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
+  |> yield(name: "mean")
+
+

Two important things to note here are:

+
    +
  1. The bucket name: power/autogen; and
  2. +
  3. The measurement name: hiking2.
  4. +
+

example: adapting Node-RED

+

Node-RED flow models

+
    +
  1. +

    Assume you have an existing flow (eg a fairly standard 3-node flow) which is logging to an InfluxDB 1.8 database. Your goal is to modify the flow to log the same data to the recently-migrated InfluxDB 2 bucket.

    +
  2. +
  3. +

    Start Node-RED if it is not running:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d nodered
    +
    +
  4. +
  5. +

    Use a web browser to connect to your Node-RED instance.

    +
  6. +
  7. +

    Drag a new InfluxDB-out node onto the canvas:

    +
      +
    • This is exactly the same InfluxDB-out node that you have been using to write to your InfluxDB 1.8 databases. There isn't a different node or package for InfluxDB 2.
    • +
    • Always drag a new InfluxDB-out node from the palette onto the canvas. Do not make the mistake of re-using an existing InfluxDB-out node (eg via copy and paste) because that is a very good way of breaking your flows.
    • +
    +
  8. +
  9. +

    Double-click the InfluxDB-out node to open it:

    +

    InfluxDB-out node

    +
      +
    • At [A], give the node a sensible name.
    • +
    • +

      Click the pencil icon [B] adjacent to the Server field:

      +
        +
      • Leave the Name field [C] blank. This ensures that the title in the popup menu [D] automatically reflects the version and connection URL.
      • +
      • Change the Version popup menu [E] to "2.0".
      • +
      • +

        Set the URL [F] to point to your InfluxDB 2 instance:

        +
        http://influxdb2:8086
        +
        +
        +

        In this context, "influxdb2" is the container name and 8086 is the container's internal port. Node-RED communicates with InfluxDB 2 across the internal bridged network (see assumptions).

        +
        +
      • +
      • +

        Paste your «token» into the Token field [G].

        +
      • +
      • Click "Update" [H].
      • +
      +
    • +
    • +

      Set the Organisation field [I] to your «organisation».

      +
    • +
    • +

      Set the Bucket [J] to the correct value. You can get that from either:

      +
        +
      • area [B] in the Influx Explorer screen shot; or
      • +
      • the bucket name from the saved Flux query.
      • +
      +

      In this example, the bucket name is "power/autogen".

      +
    • +
    • +

      Set the Measurement [K] to the measurement name. You can get that from either:

      +
        +
      • area [C] in the Influx Explorer screen shot; or
      • +
      • the measurement name from the saved Flux query.
      • +
      +

      In this example, the measurement name is "hiking2".

      +
    • +
    • +

      Click Done [L].

      +
    • +
    +
  10. +
  11. +

    Connect the outlet of the Change node to the inlet of the InfluxDB-out node.

    +
  12. +
  13. Click Deploy.
  14. +
  15. Watch the debug panel to make sure no errors are being reported.
  16. +
  17. +

    Go back to the InfluxDB 2 Data Explorer and click the refresh button "I". If everything has gone according to plan, you should see recent observations added to your graph.

    +
    +

    You may need to wait until your sensor has sent new data.

    +
    +
  18. +
+

example: adapting Grafana

+

defining an InfluxDB 2 data source

+
    +
  1. +

    Start Grafana if it is not running:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d grafana
    +
    +
  2. +
  3. +

    Use a web browser to connect to your Grafana instance and login as an administrator.

    +
  4. +
  5. Hover your mouse over the "gear" icon in the tool-strip on the left hand side, and choose "Data sources".
  6. +
  7. Click the "Add data source" button.
  8. +
  9. Select the "InfluxDB" option.
  10. +
  11. +

    Configure as follows:

    +

    Grafana config DB source

    +
      +
    • Change the Name [A] to a meaningful title that reflects the bucket you are going to query. For example, "InfluxDB 2.0 power".
    • +
    • +

      Change the Query Language popup menu [B] to "Flux".

      +
      +

      Ignore the advice about Flux support being in beta.

      +
      +
    • +
    • +

      Change the URL [C] to point to your InfluxDB 2 instance:

      +
      http://influxdb2:8086
      +
      +
      +

      In this context, "influxdb2" is the container name and 8086 is the container's internal port. Grafana communicates with InfluxDB 2 across the internal bridged network (see assumptions).

      +
      +
    • +
    • +

      Turn off all the switches in the "Auth" group [D].

      +
    • +
    • Set the Organisation [E] to your «organisation».
    • +
    • +

      Paste your «token» into the Token field [F].

      +
      +

      ignore the fact that the prompt text says "password" - you need the token!

      +
      +
    • +
    • +

      Set the Default Bucket [G] to the bucket (database) you want to query. You can get that from either:

      +
        +
      • area [B] in the Influx Explorer screen shot; or
      • +
      • the bucket name from the saved Flux query.
      • +
      +

      In this example, the value is "power/autogen".

      +
    • +
    • +

      Click Save & Test [H].

      +
    • +
    +
  12. +
+

using an InfluxDB 2 data source in a dashboard

+
    +
  1. Find the + icon in the tool-strip on the left hand side, hover your mouse over it and choose "Create » dashboard".
  2. +
  3. Click "Add a new panel".
  4. +
  5. Change the "Data source" popup to the bucket connection you created earlier ("InfluxDB 2.2 power").
  6. +
  7. The editor automatically switches into Flux mode.
  8. +
  9. Paste the query text you saved earlier from the InfluxDB 2 query inspector.
  10. +
  11. If necessary, change the duration to a period that is likely to contain some data to display.
  12. +
  13. Click the Refresh button.
  14. +
  15. Click Apply.
  16. +
+

In the side-by-side screen shots below, observations before the straight-line (missing data) segment were imported from InfluxDB 1.8 while observations after the straight-line segment were inserted by the new InfluxDB-out node in Node-RED.

+

compare results

+

odds and ends

+
    +
  1. +

    Forgot your token:

    +
    $ docker exec influxdb2 influx auth ls
    +
    +
  2. +
  3. +

    Create a new user, password and token:

    +
    $ docker exec influxdb2 influx user create --name «username» --password «password»
    +$ docker exec influxdb2 influx auth create --user «username» --all-access
    +
    +
  4. +
  5. +

    List available buckets:

    +
    $ docker exec influxdb2 influx bucket ls
    +
    +
  6. +
  7. +

    Delete the default «bucket»:

    +
    $ docker exec influxdb2 influx bucket delete --org «organisation» --name «bucket»
    +
    +
  8. +
+

migration strategy

+

From the fact that both InfluxDB 1.8 and InfluxDB 2 can run in parallel, with Node-RED feeding the same data to both, it should be self-evident that you can repeat the data-migration as often as necessary, simply by starting from re-initialising InfluxDB 2.

+

This implies that you can concentrate on one database at a time, adjusting Node-RED so that it writes each row of sensor data to both the InfluxDB 1.8 database and corresponding InfluxDB 2 bucket.

+

Having the data going to both engines means you can take your time adjusting your Grafana dashboards to be based on Flux queries. You can either retrofit InfluxDB 2 bucket sources and Flux queries to existing dashboards, or build parallel dashboards from the ground up.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Kapacitor/index.html b/Containers/Kapacitor/index.html new file mode 100644 index 000000000..458ca86d7 --- /dev/null +++ b/Containers/Kapacitor/index.html @@ -0,0 +1,2287 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Kapacitor - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kapacitor

+

References

+ +

Upgrading Kapacitor

+

You can update the container via:

+
$ cd ~/IOTstack
+$ docker-compose pull
+$ docker-compose up -d
+$ docker system prune
+
+

In words:

+
    +
  • docker-compose pull downloads any newer images;
  • +
  • docker-compose up -d causes any newly-downloaded images to be instantiated as containers (replacing the old containers); and
  • +
  • the prune gets rid of the outdated images.
  • +
+

Kapacitor version pinning

+

If you need to pin to a particular version:

+
    +
  1. Use your favourite text editor to open docker-compose.yml.
  2. +
  3. +

    Find the line:

    +

    yaml + image: kapacitor:1.5

    +
  4. +
  5. +

    Replace 1.5 with the version you wish to pin to. For example, to pin to version 1.5.9:

    +

    yaml + image: kapacitor:1.5.9

    +

    Note:

    +
      +
    • Be cautious about using the latest tag. At the time of writing, there was no linux/arm/v7 architecture support.
    • +
    +
  6. +
  7. +

    Save the file and tell docker-compose to bring up the container:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d kapacitor
    +$ docker system prune
    +
    +
  8. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/MJPEG-Streamer/index.html b/Containers/MJPEG-Streamer/index.html new file mode 100644 index 000000000..681eb77e3 --- /dev/null +++ b/Containers/MJPEG-Streamer/index.html @@ -0,0 +1,2753 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Motion JPEG Streamer - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Motion JPEG Streamer

+

The mjpg-streamer container lets you pass a video stream from a local camera to a motioneye container. The mjpg-streamer and motioneye containers can be running on the same or different hosts.

+

Each mjpg-streamer container can process a stream from an official Raspberry Pi "ribbon cable" camera, or from a third-party USB-connected camera, such as those from Logitech.

+

Using mjpg-streamer to handle your video streams gives you a consistent approach to supporting multiple cameras and camera types. You do not need to care about distinctions between "ribbon" or USB cameras, nor which hosts are involved.

+

Raspberry Pi Ribbon Camera

+
+

This section is only relevant if you are trying to use a camera that connects to your Raspberry Pi via a ribbon cable.

+
+

Beginning with Raspberry Pi OS Bullseye, the Raspberry Pi Foundation introduced the LibCamera subsystem and withdrew support for the earlier raspistill and raspivid mechanisms which then became known as the legacy camera system.

+

The introduction of the LibCamera subsystem triggered quite a few articles (and videos) on the topic, of which this is one example:

+ +

Although the LibCamera subsystem works quite well with "native" applications, it has never been clear whether it supports passing camera streams to Docker containers. At the time of writing (2023-10-23), this author has never been able to find any examples which demonstrate that such support exists.

+

It is important to understand that:

+
    +
  1. This only applies to the Raspberry Pi Ribbon Camera;
  2. +
  3. In order to access a Raspberry Pi Ribbon Camera, the mjpg-streamer container depends on the legacy camera system; and
  4. +
  5. The LibCamera subsystem and the legacy camera system are mutually exclusive.
  6. +
+

In other words, if you want to use the mjpg-streamer container to process a stream from a Raspberry Pi Ribbon Camera, you have to forgo using the LibCamera subsystem.

+

preparing your Raspberry Pi

+

If you have a Raspberry Pi Ribbon Camera, prepare your system like this:

+
    +
  1. +

    Check the version of your system by running:

    +
    $ grep "VERSION_CODENAME" /etc/os-release
    +
    +

    The answer should be one of "buster", "bullseye" or "bookworm".

    +
  2. +
  3. +

    Configure camera support:

    +
      +
    • +

      if your system is running Buster, run this command:

      +
      $ sudo raspi-config nonint do_camera 0
      +
      +

      Buster pre-dates LibCamera so this is the same as enabling the legacy camera system. In this context, 0 means "enable" and 1 means "disable".

      +
    • +
    • +

      if your system is running Bullseye or Bookworm, run these commands:

      +
      $ sudo raspi-config nonint do_camera 1
      +$ sudo raspi-config nonint do_legacy 0
      +
      +

      The first command is protective and turns off the LibCamera subsystem, while the second command enables the legacy camera system.

      +
      +

      When executed from the command line, both the do_camera and do_legacy commands are supported in the Bookworm version of raspi-config. However, neither command is available when raspi-config is invoked as a GUI in a Bookworm system. This likely implies that the commands have been deprecated and will be removed, in which case this documentation will break.

      +
      +
    • +
    +
  4. +
  5. +

    Reboot your system:

    +
    $ sudo reboot
    +
    +
  6. +
  7. +

    Make a note that your ribbon camera will be accessible on /dev/video0.

    +
  8. +
+

Third-party cameras

+

The simplest approach is:

+
    +
  1. Connect your camera to a USB port.
  2. +
  3. +

    Run:

    +
    $ ls -l /dev/v4l/by-id
    +
    +

    This is an example of the response with a LogiTech "C920 PRO FHD Webcam 1080P" camera connected:

    +
    lrwxrwxrwx 1 root root 12 Oct 23 15:42 usb-046d_HD_Pro_Webcam_C920-video-index0 -> ../../video1
    +lrwxrwxrwx 1 root root 12 Oct 23 15:42 usb-046d_HD_Pro_Webcam_C920-video-index1 -> ../../video2
    +
    +

    In general, the device at index0 is where your camera will be accessible, as in:

    +
    /dev/v4l/by-id/usb-046d_HD_Pro_Webcam_C920-video-index0
    +
    +
  4. +
+

If you don't get a sensible response to the ls command then try disconnecting and reconnecting your camera, and rebooting your system.

+

Container variables

+

environment variables

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
variabledefaultremark
MJPG_STREAMER_USERNAMEcontainer IDchanges each time the container is recreated
MJPG_STREAMER_PASSWORDrandom UUIDchanges each time the container restarts
MJPG_STREAMER_SIZE640x480should be one of your camera's natural resolutions
MJPG_STREAMER_FPS5frames per second
+

device variable

+ + + + + + + + + + + + + + + +
variabledefaultremark
MJPG_STREAMER_EXTERNAL_DEVICE/dev/video0must be set to your video device
+

Setting your variables

+

To initialise your environment, begin by using a text editor (eg vim, nano) to edit ~/IOTstack/.env (which may or may not already exist):

+
    +
  1. +

    If your .env file does not already define your time-zone, take the opportunity to set it. For example:

    +
    TZ=Australia/Sydney
    +
    +
  2. +
  3. +

    The access credentials default to random values which change each time the container starts. This is reasonably secure but is unlikely to be useful in practice, so you need to invent some credentials of your own. Example:

    +
    MJPG_STREAMER_USERNAME=streamer
    +MJPG_STREAMER_PASSWORD=oNfDG-d1kgzC
    +
    +
  4. +
  5. +

    Define the external device path to your camera. Two examples have been given above:

    +
      +
    • +

      a ribbon camera:

      +
      MJPG_STREAMER_EXTERNAL_DEVICE=/dev/video0
      +
      +
    • +
    • +

      a Logitech C920 USB camera:

      +
      MJPG_STREAMER_EXTERNAL_DEVICE=/dev/v4l/by-id/usb-046d_HD_Pro_Webcam_C920-video-index
      +
      +
    • +
    +
  6. +
  7. +

    If you know your camera supports higher resolutions, you can also set the size. Examples:

    +
      +
    • +

      the ribbon camera can support:

      +
      MJPG_STREAMER_SIZE=1152x648
      +
      +
    • +
    • +

      the Logitech C920 can support:

      +
      MJPG_STREAMER_SIZE=1920x1080
      +
      +
    • +
    +
  8. +
  9. +

    If the mjpg-streamer and motioneye containers are going to be running on:

    +
      +
    • +

      the same host, you can consider increasing the frame rate:

      +
      MJPG_STREAMER_FPS=30
      +
      +

      Even though we are setting up a web camera, the traffic will never leave the host and will not traverse your Ethernet or WiFi networks.

      +
    • +
    • +

      different hosts, you should probably leave the rate at 5 frames per second until you understand the impact on network traffic.

      +
    • +
    +
  10. +
  11. +

    Save your work.

    +
  12. +
+

Tip:

+
    +
  • Do not use quote marks (either single or double quotes) to surround the values of your environment variables. This is because docker-compose treats the quotes as part of the string. If you used quotes, please go back and remove them.
  • +
+

alternative approach

+

It is still a good idea to define TZ in your .env file. Most IOTstack containers now use the TZ=${TZ:-Etc/UTC} syntax so a single entry in your .env sets the timezone for all of your containers.

+

However, if you prefer to keep most of your environment variables inline in your docker-compose.yml rather than in .env, you can do that. Example:

+
environment:
+  - TZ=${TZ:-Etc/UTC}
+  - MJPG_STREAMER_USERNAME=streamer
+  - MJPG_STREAMER_PASSWORD=oNfDG-d1kgzC
+  - MJPG_STREAMER_SIZE=1152x648
+  - MJPG_STREAMER_FPS=5
+
+

Similarly for the camera device mapping:

+
devices:
+  - "/dev/v4l/by-id/usb-046d_HD_Pro_Webcam_C920-video-index:/dev/video0"
+
+

about variable substitution syntax

+

If you're wondering about the syntax used for environment variables:

+
  - MJPG_STREAMER_USERNAME=${MJPG_STREAMER_USERNAME:-}
+
+

it means that .env will be checked for the presence of MJPG_STREAMER_USERNAME=value. If the key is found, its value will be used. If the key is not found, the value will be set to a null string. Then, inside the container, a null string is used as the trigger to apply the defaults listed in the table above.

+

In the case of the camera device mapping, this syntax:

+
  - "${MJPG_STREAMER_EXTERNAL_DEVICE:-/dev/video0}:/dev/video0"
+
+

means that .env will be checked for the presence of MJPG_STREAMER_EXTERNAL_DEVICE=path. If the key is found, the path will be used. If the key is not found, the path will be set to /dev/video0 on the assumption that a camera is present and the device exists.

+

Regardless of whether a device path comes from .env, or is defined inline, or defaults to /dev/video0, if the device does not actually exist then docker-compose will refuse to start the container with the following error:

+
Error response from daemon: error gathering device information while adding custom device "«path»": no such file or directory
+
+

Starting the container

+
    +
  1. +

    Start the container like this:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d mjpg-streamer
    +
    +

    The first time you do this triggers a fairly long process. First, a basic operating system image is downloaded from DockerHub, then a Dockerfile is run to add the streamer software and construct a local image, after which the local image is instantiated as your running container. Subsequent launches use the local image so the container starts immediately. See also container maintenance.

    +
  2. +
  3. +

    Once the container is running, make sure it is behaving normally and has not gone into a restart loop:

    +
    $ docker ps -a --format "table {{.Names}}\t{{.RunningFor}}\t{{.Status}}"
    +
    +
    +

    The docker ps command produces a lot of output which generally results in line-wrapping and can be hard to read. The --format argument reduces this clutter by focusing on the interesting columns. If you have IOTstackAliases installed, you can use DPS instead of copy/pasting the above command.

    +
    +

    If the container is restarting, you will see evidence of that in the STATUS column. If that happens, re-check the values set in the .env file and "up" the container again. The container's log (see below) may also be helpful.

    +
  4. +
  5. +

    Check the container's log:

    +
    $ docker logs mjpg-streamer
    + i: Using V4L2 device.: /dev/video0
    + i: Desired Resolution: 1152 x 648
    + i: Frames Per Second.: 5
    + i: Format............: JPEG
    + i: TV-Norm...........: DEFAULT
    + o: www-folder-path......: /usr/local/share/mjpg-streamer/www/
    + o: HTTP TCP port........: 80
    + o: HTTP Listen Address..: (null)
    + o: username:password....: streamer:oNfDG-d1kgzC
    + o: commands.............: enabled
    +
    +

    Many of the values you set earlier using environment variables show up here so viewing the log is a good way of making sure everything is being passed to the container.

    +

    Note:

    +
      +
    • The /dev/video0 in the first line of output is the internal device path (inside the container). This is not the same as the external device path associated with MJPG_STREAMER_EXTERNAL_DEVICE. The container doesn't know about the external device path so it has no way to display it.
    • +
    +
  6. +
+

Connecting the camera to MotionEye

+
    +
  1. Use a browser to connect with MotionEye on port 8765.
  2. +
  3. Authenticate as an administrator (the default is "admin" with no password).
  4. +
  5. Click the ☰ icon at the top, left of the screen so that it rotates 90° and exposes the "Camera" popup menu.
  6. +
  7. In the "Camera" popup menu field, click the ▾ and choose "Add Camera…".
  8. +
  9. Change the "Camera Type" field to "Network Camera".
  10. +
  11. +

    If the motioneye and mjpg-streamer containers are running on:

    +
      +
    • +

      the same host, the URL should be:

      +
      http://mjpg-streamer:80/?action=stream
      +
      +

      Here:

      +
        +
      • mjpg-streamer is the name of the container. Technically, it is a host name (rather than a domain name); and
      • +
      • +

        port 80 is the internal port that the streamer process running inside the container is listening to. It comes from the right hand side of the port mapping in the service definition:

        +
        ports:
        +- "8980:80"
        +
        +
      • +
      +
    • +
    • +

      different hosts, the URL should be in this form:

      +
      http://«name-or-ip»:8980/?action=stream
      +
      +

      Here:

      +
        +
      • +

        «name-or-ip» is the domain name or IP address of the host on which the mjpg-streamer container is running. Examples:

        +
        http://raspberrypi.local:8980/?action=stream
        +http://my-spy.domain.com:8980/?action=stream
        +http://192.168.200.200:8980/?action=stream
        +
        +
      • +
      • +

        port 8980 is the external port that the host where the mjpg-streamer container is running is listening on behalf of the container. It comes from the left hand side of the port mapping in the service definition:

        +
        ports:
        +- "8980:80"
        +
        +
      • +
      +
    • +
    +
  12. +
  13. +

    Enter the Username ("streamer" in this example).

    +
  14. +
  15. Enter the Password ("oNfDG-d1kgzC" in this example).
  16. +
  17. Click in the Username field again. This causes MotionEye to retry the connection, after which the camera should appear in the Camera field.
  18. +
  19. Click OK. The camera feed should start working.
  20. +
+

Container maintenance

+

Because it is built from a local Dockerfile, the mjpg-streamer does not get updated in response to a normal "pull". If you want to rebuild the container, proceed like this:

+
$ cd ~/IOTstack
+$ docker-compose build --no-cache --pull mjpg-streamer
+$ docker-compose up -d mjpg-streamer
+$ docker system prune -f
+
+

If you have IOTstackAliases installed, the above is:

+
$ REBUILD mjpg-streamer
+$ UP mjpg-streamer
+$ PRUNE
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/MariaDB/index.html b/Containers/MariaDB/index.html new file mode 100644 index 000000000..fc85e6a9c --- /dev/null +++ b/Containers/MariaDB/index.html @@ -0,0 +1,2525 @@ + + + + + + + + + + + + + + + + + + + + + + + + + MariaDB - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

MariaDB

+

Source

+ +

About

+

MariaDB is a fork of MySQL. This is an unofficial image provided by linuxserver.io because there is no official image for arm.

+

Connecting to the DB

+

The port is 3306. It exists inside the docker network so you can connect via mariadb:3306 for internal connections. For external connections use <your Pis IP>:3306

+

image

+

Setup

+

Before starting the stack, edit the docker-compose.yml file and check your environment variables. In particular:

+
  environment:
+    - TZ=Etc/UTC
+    - MYSQL_ROOT_PASSWORD=
+    - MYSQL_DATABASE=default
+    - MYSQL_USER=mariadbuser
+    - MYSQL_PASSWORD=
+
+

If you are running old-menu, you will have to set both passwords. Under new-menu, the menu may have allocated random passwords for you but you can change them if you like.

+

You only get the opportunity to change the MQSL_ prefixed environment variables before you bring up the container for the first time. If you decide to change these values after initialisation, you will either have to:

+
    +
  1. +

    Erase the persistent storage area and start again. There are three steps:

    +
      +
    • +

      Stop the container and remove the persistent storage area:

      +
      $ cd ~/IOTstack
      +$ docker-compose down mariadb
      +$ sudo rm -rf ./volumes/mariadb
      +
      +
      +

      see also if downing a container doesn't work

      +
      +
    • +
    • +

      Edit docker-compose.yml and change the variables.

      +
    • +
    • +

      Bring up the container:

      +
      $ docker-compose up -d mariadb
      +
      +
    • +
    +
  2. +
  3. +

    Open a terminal window within the container (see below) and change the values by hand.

    +
    +

    The how-to is beyond the scope of this documentation. Google is your friend!

    +
    +
  4. +
+

Terminal

+

You can open a terminal session within the mariadb container via:

+
$ docker exec -it mariadb bash
+
+

To connect to the database: mysql -uroot -p

+

To close the terminal session, either:

+
    +
  • type "exit" and press return; or
  • +
  • press control+d.
  • +
+

Container health check

+

theory of operation

+

A script , or "agent", to assess the health of the MariaDB container has been added to the local image via the Dockerfile. In other words, the script is specific to IOTstack.

+

The agent is invoked 30 seconds after the container starts, and every 30 seconds thereafter. The agent:

+
    +
  1. +

    Runs the command:

    +
    mysqladmin ping -h localhost
    +
    +
  2. +
  3. +

    If that command succeeds, the agent compares the response returned by the command with the expected response:

    +
    mysqld is alive
    +
    +
  4. +
  5. +

    If the command returned the expected response, the agent tests the responsiveness of the TCP port the mysqld daemon should be listening on (see customising health-check).

    +
  6. +
  7. +

    If all of those steps succeed, the agent concludes that MariaDB is functioning properly and returns "healthy".

    +
  8. +
+

monitoring health-check

+

Portainer's Containers display contains a Status column which shows health-check results for all containers that support the feature.

+

You can also use the docker ps command to monitor health-check results. The following command narrows the focus to mariadb:

+
$ docker ps --format "table {{.Names}}\t{{.Status}}"  --filter name=mariadb
+
+

Possible reply patterns are:

+
    +
  1. +

    The container is starting and has not yet run the health-check agent:

    +
    NAMES     STATUS
    +mariadb   Up 5 seconds (health: starting)
    +
    +
  2. +
  3. +

    The container has been running for at least 30 seconds and the health-check agent has returned a positive result within the last 30 seconds:

    +
    NAMES     STATUS
    +mariadb   Up 33 seconds (healthy)
    +
    +
  4. +
  5. +

    The container has been running for more than 90 seconds but has failed the last three successive health-check tests:

    +
    NAMES     STATUS
    +mariadb   Up About a minute (unhealthy)
    +
    +
  6. +
+

customising health-check

+

You can customise the operation of the health-check agent by editing the mariadb service definition in your Compose file:

+
    +
  1. +

    By default, the mysqld daemon listens to internal port 3306. If you need change that port, you also need to inform the health-check agent via an environment variable. For example, suppose you changed the internal port to 12345:

    +
        environment:
    +      - MYSQL_TCP_PORT=12345
    +
    +

    Notes:

    +
      +
    • The MYSQL_TCP_PORT variable is defined by MariaDB, not IOTstack, so changing this variable affects more than just the health-check agent.
    • +
    • +

      If you are running "old menu", this change should be made in the file:

      +
      ~/IOTstack/services/mariadb/mariadb.env
      +
      +
    • +
    +
  2. +
  3. +

    The mysqladmin ping command relies on the root password supplied via the MYSQL_ROOT_PASSWORD environment variable in the Compose file. The command will not succeed if the root password is not correct, and the agent will return "unhealthy".

    +
  4. +
  5. +

    If the health-check agent misbehaves in your environment, or if you simply don't want it to be active, you can disable all health-checking for the container by adding the following lines to its service definition:

    +
        healthcheck:
    +      disable: true
    +
    +

    Note:

    +
      +
    • +

      The mere presence of a healthcheck: clause in the mariadb service definition overrides the supplied agent. In other words, the following can't be used to re-enable the supplied agent:

      +
          healthcheck:
      +      disable: false
      +
      +

      You must remove the entire healthcheck: clause.

      +
    • +
    +
  6. +
+

Keeping MariaDB up-to-date

+

To update the mariadb container:

+
$ cd ~/IOTstack
+$ docker-compose build --no-cache --pull mariadb
+$ docker-compose up -d mariadb
+$ docker system prune
+$ docker system prune
+
+

The first "prune" removes the old local image, the second removes the old base image.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Mosquitto/index.html b/Containers/Mosquitto/index.html new file mode 100644 index 000000000..c4dbbd797 --- /dev/null +++ b/Containers/Mosquitto/index.html @@ -0,0 +1,3445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Mosquitto - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Mosquitto

+

This document discusses an IOTstack-specific version of Mosquitto built on top of Eclipse/Mosquitto using a Dockerfile.

+
+

If you want the documentation for the original implementation of Mosquitto (just "as it comes" from DockerHub) please see Mosquitto.md on the old-menu branch.

+
+
+ +

References

+ +

Significant directories and files

+
~/IOTstack
+├── .templates
+│   └── mosquitto
+│       ├── service.yml ❶
+│       ├── Dockerfile ❷
+│       ├── docker-entrypoint.sh ❸
+│       └── iotstack_defaults ❹
+│           ├── config
+│           │   ├── filter.acl
+│           │   └── mosquitto.conf
+│           └── pwfile
+│               └── pwfile
+├── services
+│   └── mosquitto
+│       └── service.yml ❺
+├── docker-compose.yml ❻
+└── volumes
+    └── mosquitto ❼
+        ├── config
+        │   ├── filter.acl 
+        │   └── mosquitto.conf
+        ├── data
+        │   └── mosquitto.db
+        ├── log
+        └── pwfile 
+            └── pwfile
+
+
    +
  1. The template service definition.
  2. +
  3. The Dockerfile used to customise Mosquitto for IOTstack.
  4. +
  5. A replacement for the Eclipse-Mosquitto script of the same name, extended to handle container self-repair.
  6. +
  7. A standard set of defaults for IOTstack (used to initialise defaults on first run, and for container self-repair).
  8. +
  9. The working service definition (only relevant to old-menu, copied from ❶).
  10. +
  11. The Compose file (includes ❶).
  12. +
  13. +

    The persistent storage area:

    +
      +
    • Directories and files in ❼ are owned by userID 1883. This is enforced each time Mosquitto starts.
    • +
    • You will normally need sudo to make changes in this area.
    • +
    • Each time Mosquitto starts, it automatically replaces anything originating in ❹ that has gone missing from ❼. This "self-repair" function is intended to provide reasonable assurance that Mosquitto will at least start instead of going into a restart loop.
    • +
    +
  14. +
+

How Mosquitto gets built for IOTstack

+

Mosquitto source code (GitHub)

+

The source code for Mosquitto lives at GitHub eclipse/mosquitto.

+

Mosquitto images (DockerHub)

+

Periodically, the source code is recompiled and the resulting image is pushed to eclipse-mosquitto on DockerHub.

+

IOTstack menu

+

When you select Mosquitto in the IOTstack menu, the template service definition is copied into the Compose file.

+
+

Under old menu, it is also copied to the working service definition and then not really used.

+
+

IOTstack first run

+

On a first install of IOTstack, you run the menu, choose Mosquitto as one of your containers, and are told to do this:

+
$ cd ~/IOTstack
+$ docker-compose up -d
+
+
+

See also the Migration considerations (below).

+
+

docker-compose reads the Compose file. When it arrives at the mosquitto fragment, it finds:

+
  mosquitto:
+    container_name: mosquitto
+    build:
+      context: ./.templates/mosquitto/.
+      args:
+      - MOSQUITTO_BASE=eclipse-mosquitto:latest
+    
+
+

Note:

+
    +
  • +

    Earlier versions of the Mosquitto service definition looked like this:

    +
      mosquitto:
    +    container_name: mosquitto
    +    build: ./.templates/mosquitto/.
    +    
    +
    +

    The single-line build produces exactly the same result as the four-line build, save that the single-line form does not support pinning Mosquitto to a specific version.

    +
  • +
+

The ./.templates/mosquitto/. path associated with the build tells docker-compose to look for:

+
~/IOTstack/.templates/mosquitto/Dockerfile
+
+
+

The Dockerfile is in the .templates directory because it is intended to be a common build for all IOTstack users. This is different to the arrangement for Node-RED where the Dockerfile is in the services directory because it is how each individual IOTstack user's version of Node-RED is customised.

+
+

The Dockerfile begins with:

+
ARG MOSQUITTO_BASE=eclipse-mosquitto:latest
+FROM $MOSQUITTO_BASE
+
+

The FROM statement tells the build process to pull down the base image from DockerHub.

+
+

It is a base image in the sense that it never actually runs as a container on your Raspberry Pi.

+
+

The remaining instructions in the Dockerfile customise the base image to produce a local image. The customisations are:

+
    +
  1. +

    Add the rsync and tzdata packages.

    +
      +
    • rsync helps the container perform self-repair; while
    • +
    • tzdata enables Mosquitto to respect the "TZ" environment variable.
    • +
    +
  2. +
  3. +

    Add a standard set of configuration defaults appropriate for IOTstack.

    +
  4. +
  5. +

    Replace docker-entrypoint.sh with a version which:

    +
      +
    • Calls rsync to perform self-repair if configuration files go missing; and
    • +
    • Enforces 1883:1883 ownership in ~/IOTstack/volumes/mosquitto.
    • +
    +
  6. +
+

The local image is instantiated to become your running container.

+

When you run the docker images command after Mosquitto has been built, you may see two rows for Mosquitto:

+
$ docker images
+REPOSITORY                      TAG         IMAGE ID       CREATED        SIZE
+iotstack_mosquitto              latest      cf0bfe1a34d6   4 weeks ago    11.6MB
+eclipse-mosquitto               latest      46ad1893f049   4 weeks ago    8.31MB
+
+
    +
  • eclipse-mosquitto is the base image; and
  • +
  • iotstack_mosquitto is the local image.
  • +
+

You may see the same pattern in Portainer, which reports the base image as "unused". You should not remove the base image, even though it appears to be unused.

+
+

Whether you see one or two rows depends on the version of docker-compose you are using and how your version of docker-compose builds local images.

+
+

Migration considerations

+

Under the original IOTstack implementation of Mosquitto (just "as it comes" from DockerHub), the service definition expected the configuration files to be at:

+
~/IOTstack/services/mosquitto/mosquitto.conf
+~/IOTstack/services/mosquitto/filter.acl
+
+

Under this implementation of Mosquitto, the configuration files have moved to:

+
~/IOTstack/volumes/mosquitto/config/mosquitto.conf
+~/IOTstack/volumes/mosquitto/config/filter.acl
+
+
+

The change of location is one of the things that allows self-repair to work properly.

+
+

The default versions of each configuration file are the same. Only the locations have changed. If you did not alter either file when you were running the original IOTstack implementation of Mosquitto, there will be no change in Mosquitto's behaviour when it is built from a Dockerfile.

+

However, if you did alter either or both configuration files, then you should compare the old and new versions and decide whether you wish to retain your old settings. For example:

+
$ cd ~/IOTstack
+$ diff ./services/mosquitto/mosquitto.conf ./volumes/mosquitto/config/mosquitto.conf 
+
+
+

You can also use the -y option on the diff command to see a side-by-side comparison of the two files.

+
+

Using mosquitto.conf as the example, assume you wish to use your existing file instead of the default:

+
    +
  1. +

    To move your existing file into the new location:

    +
    $ cd ~/IOTstack
    +$ sudo mv ./services/mosquitto/mosquitto.conf ./volumes/mosquitto/config/mosquitto.conf
    +
    +
    +

    The move overwrites the default. At this point, the moved file will probably be owned by user "pi" but that does not matter.

    +
    +
  2. +
  3. +

    Mosquitto will always enforce correct ownership (1883:1883) on any restart but it will not overwrite permissions. If in doubt, use mode 644 as your default for permissions:

    +
    $ sudo chmod 644 ./services/mosquitto/mosquitto.conf
    +
    +
  4. +
  5. +

    Restart Mosquitto:

    +
    $ docker-compose restart mosquitto
    +
    +
  6. +
  7. +

    Check your work:

    +
    $ ls -l ./volumes/mosquitto/config/mosquitto.conf
    +-rw-r--r-- 1 1883 1883 ssss mmm dd hh:mm ./volumes/mosquitto/config/mosquitto.conf
    +
    +
  8. +
  9. +

    If necessary, repeat these steps with filter.acl.

    +
  10. +
+

Logging

+

Mosquitto logging is controlled by mosquitto.conf. This is the default configuration:

+
#log_dest file /mosquitto/log/mosquitto.log
+log_dest stdout
+log_timestamp_format %Y-%m-%dT%H:%M:%S
+# Reduce size and SD-card flash wear, safe to remove if using a SSD
+connection_messages false
+
+

When log_dest is set to stdout, you inspect Mosquitto's logs like this:

+
$ docker logs mosquitto
+
+

Logs written to stdout are stored and persisted to disk as managed by Docker. +They are kept over reboots, but are lost when your Mosquitto container is +removed or updated.

+

The alternative, which may be more appropriate if you are running on an SSD or HD, is to change mosquitto.conf to be like this:

+
log_dest file /mosquitto/log/mosquitto.log
+#log_dest stdout
+log_timestamp_format %Y-%m-%dT%H:%M:%S
+
+

and then restart Mosquitto:

+
$ cd ~/IOTstack
+$ docker-compose restart mosquitto
+
+

The path /mosquitto/log/mosquitto.log is an internal path. When this style of logging is active, you inspect Mosquitto's logs using the external path like this:

+
$ sudo tail ~/IOTstack/volumes/mosquitto/log/mosquitto.log
+
+
+

You need to use sudo because the log is owned by userID 1883 and Mosquitto creates it without "world" read permission.

+
+

Logs written to mosquitto.log persist until you take action to prune the file.

+

Security

+

Configuring security

+

Mosquitto security is controlled by mosquitto.conf. These are the relevant directives:

+
#password_file /mosquitto/pwfile/pwfile
+allow_anonymous true
+
+

Mosquitto security can be in four different states, which are summarised in the following table:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
password_fileallow_anonymoussecurity enforcementremark
disabledtrueopen accessdefault
disabledfalseall access deniednot really useful
enabledtruecredentials optional
enabledfalsecredentials required
+

Password file management

+

The password file for Mosquitto is part of a mapped volume:

+
    +
  • The internal path is /mosquitto/pwfile/pwfile
  • +
  • The external path is ~/IOTstack/volumes/mosquitto/pwfile/pwfile
  • +
+

A common problem with the previous version of Mosquitto for IOTstack occurred when the password_file directive was enabled but the pwfile was not present. Mosquitto went into a restart loop.

+

The Mosquitto container performs self-repair each time the container is brought up or restarts. If pwfile is missing, an empty file is created as a placeholder. This prevents the restart loop. What happens next depends on allow_anonymous:

+
    +
  • +

    If true then:

    +
      +
    • Any MQTT request without credentials will be permitted;
    • +
    • Any MQTT request with credentials will be rejected (because pwfile is empty so there is nothing to match on).
    • +
    +
  • +
  • +

    If false then all MQTT requests will be rejected.

    +
  • +
+

create username and password

+

To create a username and password, use the following as a template.

+
$ docker exec mosquitto mosquitto_passwd -b /mosquitto/pwfile/pwfile «username» «password» 
+
+

Replace «username» and «password» with appropriate values, then execute the command. For example, to create the username "hello" with password "world":

+
$ docker exec mosquitto mosquitto_passwd -b /mosquitto/pwfile/pwfile hello world
+
+

Note:

+
    +
  • See also customising health-check. If you are creating usernames and passwords, you may also want to create credentials for the health-check agent.
  • +
+

check password file

+

There are two ways to verify that the password file exists and has the expected content:

+
    +
  1. +

    View the file using its external path:

    +
    $ sudo cat ~/IOTstack/volumes/mosquitto/pwfile/pwfile 
    +
    +
    +

    sudo is needed because the file is neither owned nor readable by pi.

    +
    +
  2. +
  3. +

    View the file using its internal path:

    +
    $ docker exec mosquitto cat /mosquitto/pwfile/pwfile
    +
    +
  4. +
+

Each credential starts with the username and occupies one line in the file:

+
hello:$7$101$ZFOHHVJLp2bcgX+h$MdHsc4rfOAhmGG+65NpIEJkxY0beNeFUyfjNAGx1ILDmI498o4cVOaD9vDmXqlGUH9g6AgHki8RPDEgjWZMkDA==
+
+

remove entry from password file

+

To remove an entry from the password file:

+
$ docker exec mosquitto mosquitto_passwd -D /mosquitto/pwfile/pwfile «username»
+
+

reset the password file

+

There are several ways to reset the password file. Your options are:

+
    +
  1. +

    Remove the password file and restart Mosquitto:

    +
    $ cd ~/IOTstack
    +$ sudo rm ./volumes/mosquitto/pwfile/pwfile
    +$ docker-compose restart mosquitto 
    +
    +

    The result is an empty password file.

    +
  2. +
  3. +

    Clear all existing passwords while adding a new password:

    +
    $ docker exec mosquitto mosquitto_passwd -c -b /mosquitto/pwfile/pwfile «username» «password»
    +
    +

    The result is a password file with a single entry.

    +
  4. +
  5. +

    Clear all existing passwords in favour of a single dummy password which is then removed:

    +
    $ docker exec mosquitto mosquitto_passwd -c -b /mosquitto/pwfile/pwfile dummy dummy
    +$ docker exec mosquitto mosquitto_passwd -D /mosquitto/pwfile/pwfile dummy
    +
    +

    The result is an empty password file.

    +
  6. +
+

Activate Mosquitto security

+
    +
  1. +

    Use sudo and your favourite text editor to open the following file:

    +
    ~/IOTstack/volumes/mosquitto/config/mosquitto.conf
    +
    +
  2. +
  3. +

    Remove the comment indicator from the following line:

    +
    #password_file /mosquitto/pwfile/pwfile
    +
    +

    so that it becomes:

    +
    password_file /mosquitto/pwfile/pwfile
    +
    +
  4. +
  5. +

    Set allow_anonymous as required:

    +
    allow_anonymous true
    +
    +

    If true then:

    +
      +
    • Any MQTT request without credentials will be permitted;
    • +
    • The validity of credentials supplied with any MQTT request will be enforced.
    • +
    +

    If false then:

    +
      +
    • Any MQTT request without credentials will be rejected;
    • +
    • The validity of credentials supplied with any MQTT request will be enforced.
    • +
    +
  6. +
  7. +

    Save the modified configuration file and restart Mosquitto:

    +
    $ cd ~/IOTstack
    +$ docker-compose restart mosquitto
    +
    +
  8. +
+

Testing Mosquitto security

+

assumptions

+
    +
  1. You have created at least one username ("hello") and password ("world").
  2. +
  3. password_file is enabled.
  4. +
  5. allow_anonymous is false.
  6. +
+

install testing tools

+

If you do not have the Mosquitto clients installed on your Raspberry Pi (ie $ which mosquitto_pub does not return a path), install them using:

+
$ sudo apt install -y mosquitto-clients
+
+

test: anonymous access is prohibited

+

Test without providing credentials:

+
$ mosquitto_pub -h 127.0.0.1 -p 1883 -t "/password/test" -m "up up and away"
+Connection Refused: not authorised.
+Error: The connection was refused.
+
+

Note:

+
    +
  • The error is the expected result and shows that Mosquitto will not allow anonymous access.
  • +
+

test: access with credentials is permitted

+

Test with credentials

+
$ mosquitto_pub -h 127.0.0.1 -p 1883 -t "/password/test" -m "up up and away" -u hello -P world
+$ 
+
+

Note:

+
    +
  • The absence of any error message means the message was sent. Silence = success!
  • +
+

test: round-trip with credentials is permitted

+

Prove round-trip connectivity will succeed when credentials are provided. First, set up a subscriber as a background process. This mimics the role of a process like Node-Red:

+
$ mosquitto_sub -v -h 127.0.0.1 -p 1883 -t "/password/test" -F "%I %t %p" -u hello -P world &
+[1] 25996
+
+

Repeat the earlier test:

+
$ mosquitto_pub -h 127.0.0.1 -p 1883 -t "/password/test" -m "up up and away" -u hello -P world
+2021-02-16T14:40:51+1100 /password/test up up and away
+
+

Note:

+
    +
  • the second line above is coming from the mosquitto_sub running in the background.
  • +
+

When you have finished testing you can kill the background process (press return twice after you enter the kill command):

+
$ kill %1
+$
+[1]+  Terminated              mosquitto_sub -v -h 127.0.0.1 -p 1883 -t "/password/test" -F "%I %t %p" -u hello -P world
+
+

Container health check

+

theory of operation

+

A script , or "agent", to assess the health of the Mosquitto container has been added to the local image via the Dockerfile. In other words, the script is specific to IOTstack.

+

The agent is invoked 30 seconds after the container starts, and every 30 seconds thereafter. The agent:

+
    +
  • +

    Publishes a retained MQTT message to the broker running in the same container. The message payload is the current date and time, and the default topic string is:

    +
    iotstack/mosquitto/healthcheck
    +
    +
  • +
  • +

    Subscribes to the same broker for the same topic for a single message event.

    +
  • +
  • Compares the payload sent with the payload received. If the payloads (ie time-stamps) match, the agent concludes that the Mosquitto broker (the process running inside the same container) is functioning properly for round-trip messaging.
  • +
+

monitoring health-check

+

Portainer's Containers display contains a Status column which shows health-check results for all containers that support the feature.

+

You can also use the docker ps command to monitor health-check results. The following command narrows the focus to mosquitto:

+
$ docker ps --format "table {{.Names}}\t{{.Status}}"  --filter name=mosquitto
+
+

Possible reply patterns are:

+
    +
  1. +

    The container is starting and has not yet run the health-check agent:

    +
    NAMES       STATUS
    +mosquitto   Up 3 seconds (health: starting)
    +
    +
  2. +
  3. +

    The container has been running for at least 30 seconds and the health-check agent has returned a positive result within the last 30 seconds:

    +
    NAMES       STATUS
    +mosquitto   Up 34 seconds (healthy)
    +
    +
  4. +
  5. +

    The container has been running for more than 90 seconds but has failed the last three successive health-check tests:

    +
    NAMES       STATUS
    +mosquitto   Up About a minute (unhealthy)
    +
    +
  6. +
+

You can also subscribe to the same topic that the health-check agent is using to view the retained messages as they are published:

+
$ mosquitto_sub -v -h localhost -p 1883 -t "iotstack/mosquitto/healthcheck" -F "%I %t %p"
+
+

Notes:

+
    +
  • This assumes you are running the command outside container-space on the same host as your Mosquitto container. If you run this command from another host, replace localhost with the IP address or domain name of the host where your Mosquitto container is running.
  • +
  • The -p 1883 is the external port. You will need to adjust this if you are using a different external port for your MQTT service.
  • +
  • If you enable authentication for your Mosquitto broker, you will need to add -u «user» and -P «password» parameters to this command.
  • +
  • You should expect to see a new message appear approximately every 30 seconds. That indicates the health-check agent is functioning normally. Use control+c to terminate the command.
  • +
+

customising health-check

+

You can customise the operation of the health-check agent by editing the mosquitto service definition in your Compose file:

+
    +
  1. +

    By default, the mosquitto broker listens to internal port 1883. If you need change that port, you also need to inform the health-check agent via an environment variable. For example, suppose you changed the internal port to 12345:

    +
        environment:
    +      - HEALTHCHECK_PORT=12345
    +
    +
  2. +
  3. +

    If the default topic string used by the health-check agent causes a name-space collision, you can override it. For example, you could use a Universally-Unique Identifier (UUID):

    +
        environment:
    +      - HEALTHCHECK_TOPIC=4DAA361F-288C-45D5-9540-F1275BDCAF02
    +
    +

    Note:

    + +
  4. +
  5. +

    If you have enabled authentication for your Mosquitto broker service, you will need to provide appropriate credentials for your health-check agent:

    +
        environment:
    +      - HEALTHCHECK_USER=healthyUser
    +      - HEALTHCHECK_PASSWORD=healthyUserPassword
    +
    +
  6. +
  7. +

    If the health-check agent misbehaves in your environment, or if you simply don't want it to be active, you can disable all health-checking for the container by adding the following lines to its service definition:

    +
        healthcheck:
    +      disable: true
    +
    +

    Notes:

    +
      +
    • The directives to disable health-checking are independent of the environment variables. If you want to disable health-checking temporarily, there is no need to remove any HEALTHCHECK_ environment variables that may already be in place.
    • +
    • +

      Conversely, the mere presence of a healthcheck: clause in the mosquitto service definition overrides the supplied agent. In other words, the following can't be used to re-enable the supplied agent:

      +
          healthcheck:
      +      disable: false
      +
      +

      You must remove the entire healthcheck: clause.

      +
    • +
    +
  8. +
+

Upgrading Mosquitto

+

You can update most containers like this:

+
$ cd ~/IOTstack
+$ docker-compose pull
+$ docker-compose up -d
+$ docker system prune
+
+

In words:

+
    +
  • docker-compose pull downloads any newer images;
  • +
  • docker-compose up -d causes any newly-downloaded images to be instantiated as containers (replacing the old containers); and
  • +
  • the prune gets rid of the outdated images.
  • +
+

This strategy doesn't work when a Dockerfile is used to build a local image on top of a base image downloaded from DockerHub. The local image is what is running so there is no way for the pull to sense when a newer version becomes available.

+

The only way to know when an update to Mosquitto is available is to check the eclipse-mosquitto tags page on DockerHub.

+

Once a new version appears on DockerHub, you can upgrade Mosquitto like this:

+
$ cd ~/IOTstack
+$ docker-compose build --no-cache --pull mosquitto
+$ docker-compose up -d mosquitto
+$ docker system prune
+$ docker system prune
+
+

Breaking it down into parts:

+
    +
  • build causes the named container to be rebuilt;
  • +
  • --no-cache tells the Dockerfile process that it must not take any shortcuts. It really must rebuild the local image;
  • +
  • --pull tells the Dockerfile process to actually check with DockerHub to see if there is a later version of the base image and, if so, to download it before starting the build;
  • +
  • mosquitto is the named container argument required by the build command.
  • +
+

Your existing Mosquitto container continues to run while the rebuild proceeds. Once the freshly-built local image is ready, the up tells docker-compose to do a new-for-old swap. There is barely any downtime for your MQTT broker service.

+

The prune is the simplest way of cleaning up. The first call removes the old local image. The second call cleans up the old base image. Whether an old base image exists depends on the version of docker-compose you are using and how your version of docker-compose builds local images.

+

Mosquitto version pinning

+

If an update to Mosquitto introduces a breaking change, you can revert to an earlier know-good version by pinning to that version. Here's how:

+
    +
  1. +

    Use your favourite text editor to open:

    +
    ~/IOTstack/docker-compose.yml
    +
    +
  2. +
  3. +

    Find the Mosquitto service definition. If your service definition contains this line:

    +
    build: ./.templates/mosquitto/.
    +
    +

    then replace that line with the following four lines:

    +
    build:
    +  context: ./.templates/mosquitto/.
    +  args:
    +    - MOSQUITTO_BASE=eclipse-mosquitto:latest
    +
    +

    Notes:

    +
      +
    • The four-line form of the build directive is now the default for Mosquitto so those lines may already be present in your compose file.
    • +
    • Remember to use spaces, not tabs, when editing compose files.
    • +
    +
  4. +
  5. +

    Replace latest with the version you wish to pin to. For example, to pin to version 2.0.13:

    +
        - MOSQUITTO_BASE=eclipse-mosquitto:2.0.13
    +
    +
  6. +
  7. +

    Save the file and tell docker-compose to rebuild the local image:

    +
    $ cd ~/IOTstack
    +$ docker-compose build --no-cache --pull mosquitto
    +$ docker-compose up -d mosquitto
    +$ docker system prune
    +
    +

    The new local image is built, then the new container is instantiated based on that image. The prune deletes the old local image.

    +
  8. +
  9. +

    Images built in this way will always be tagged with "latest", as in:

    +
    $ docker images iotstack_mosquitto
    +REPOSITORY           TAG       IMAGE ID       CREATED              SIZE
    +iotstack_mosquitto   latest    8c0543149b9b   About a minute ago   16.2MB
    +
    +

    You may find it useful to assign an explicit tag to help you remember the version number used for the build. For example:

    +
    $ docker tag iotstack_mosquitto:latest iotstack_mosquitto:2.0.13
    +$ docker images iotstack_mosquitto
    +REPOSITORY           TAG       IMAGE ID       CREATED              SIZE
    +iotstack_mosquitto   2.0.13    8c0543149b9b   About a minute ago   16.2MB
    +iotstack_mosquitto   latest    8c0543149b9b   About a minute ago   16.2MB
    +
    +

    You can also query the image metadata to discover version information:

    +
    $ docker image inspect iotstack_mosquitto:latest | jq .[0].Config.Labels
    +{
    +  "com.github.SensorsIot.IOTstack.Dockerfile.based-on": "https://github.com/eclipse/mosquitto",
    +  "com.github.SensorsIot.IOTstack.Dockerfile.build-args": "eclipse-mosquitto:2.0.13",
    +  "description": "Eclipse Mosquitto MQTT Broker",
    +  "maintainer": "Roger Light <roger@atchoo.org>"
    +}
    +
    +
  10. +
+

About Port 9001

+

Earlier versions of the IOTstack service definition for Mosquitto included two port mappings:

+
ports:
+  - "1883:1883"
+  - "9001:9001"
+
+

Issue 67 explored the topic of port 9001 and showed that:

+
    +
  • The base image for Mosquitto did not expose port 9001; and
  • +
  • The running container was not listening to port 9001.
  • +
+

On that basis, the mapping for port 9001 was removed from service.yml.

+

If you have a use-case that needs port 9001, you can re-enable support by:

+
    +
  1. +

    Inserting the port mapping under the mosquitto definition in docker-compose.yml:

    +
    - "9001:9001"
    +
    +
  2. +
  3. +

    Inserting the additional listener in mosquitto.conf:

    +
    listener 1883
    +listener 9001
    +
    +

    You need both lines. If you omit 1883 then Mosquitto will stop listening to port 1883 and will only listen to port 9001.

    +
  4. +
  5. +

    Restarting the container:

    +
    $ cd ~/IOTstack
    +$ docker-compose restart mosquitto
    +
    +
  6. +
+

Please consider raising an issue to document your use-case. If you think your use-case has general application then please also consider creating a pull request to make the changes permanent.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/MotionEye/index.html b/Containers/MotionEye/index.html new file mode 100644 index 000000000..3278562ee --- /dev/null +++ b/Containers/MotionEye/index.html @@ -0,0 +1,2445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + MotionEye - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

MotionEye

+

About

+

MotionEye is a web frontend for the Motion project.

+

References

+ +

Service Definition

+

This is the default service definition:

+
motioneye:
+  image: dontobi/motioneye.rpi:latest
+  container_name: "motioneye"
+  restart: unless-stopped
+  ports:
+    - "8765:8765"
+    - "8766:8081"
+  environment:
+    - TZ=${TZ:-Etc/UTC}
+  volumes:
+    - ./volumes/motioneye/etc_motioneye:/etc/motioneye
+    - ./volumes/motioneye/var_lib_motioneye:/var/lib/motioneye
+
+

Administrative interface

+

MotionEye's administrative interface is available on port 8765. For example:

+
http://raspberrypi.local:8765
+
+

The default username is admin (all lower case) with no password.

+

Camera streams

+

The first camera you define in the administrative interface is assigned to internal port 8081. The default service definition maps that to port 8766:

+
- "8766:8081"
+
+

You can access the stream with a web browser on port 8766. For example:

+
http://raspberrypi.local:8766
+
+

Each subsequent camera you define in the administrative interface will be assigned a new internal port number:

+
    +
  • Camera 2 will be internal port 8082, then
  • +
  • Camera 3 will be internal port 8083,
  • +
  • and so on.
  • +
+

Each camera you define after the first will need its own port mapping in the service definition in your compose file. For example:

+
- "8767:8082"
+- "8768:8083"
+- 
+
+

Key points:

+
    +
  1. You do not have to make camera streams available outside the container. It is optional.
  2. +
  3. You do not have to accept the default internal port assignments of 8081, 8082 and so on. You can change internal ports in the administrative interface if you wish. If you do this, remember to update the internal (right hand side) ports in the service definition in your compose file.
  4. +
  5. You do not have to adopt the external port sequence 8766, 8767 and so on. Port 8766 is the default for the first camera only because it does not conflict with any other IOTstack template.
  6. +
+

Clip Storage

+

By default local camera data is stored at the internal path:

+
/var/lib/motioneye/«camera_name»
+
+

That maps to the external path:

+
~/IOTstack/volumes/motioneye/var_lib_motioneye/«camera_name»
+
+

Tips:

+
    +
  • The automatic mapping to «camera_name» can be unreliable. After defining a camera, it is a good idea to double-check the actual path in the "Root Directory" field of the "File Storage" section in the administrative interface.
  • +
  • Movie clips are kept forever by default. Depending on other settings, this can quickly run your Pi out of disk space so it's a good idea to tell MotionEye to discard old footage using the "Preserve Movies" field of the "Movies" section in the administrative interface.
  • +
+

Backup considerations

+

Although it depends on your exact settings, MotionEye's video storage can represent a significant proportion of your backup files. If you want to constrain your backup files to reasonable sizes, consider excluding the video storage from your routine backups by changing where MotionEye videos are kept. This is one approach:

+
    +
  1. +

    Be in the appropriate directory:

    +
    $ cd ~/IOTstack
    +
    +
  2. +
  3. +

    Terminate the motioneye container:

    +
    $ docker-compose down motioneye
    +
    +
    +

    see also if downing a container doesn't work

    +
    +
  4. +
  5. +

    Move the video storage folder:

    +
    $ sudo mv ./volumes/motioneye/var_lib_motioneye ~/motioneye-videos
    +
    +
  6. +
  7. +

    Open your docker-compose.yml in a text editor. Find this line in your motioneye service definition:

    +
    - ./volumes/motioneye/var_lib_motioneye:/var/lib/motioneye
    +
    +

    and change it to be:

    +
    - /home/pi/motioneye-videos:/var/lib/motioneye
    +
    +

    then save the edited compose file.

    +
  8. +
  9. +

    Start the container again:

    +
    $ docker-compose up -d motioneye
    +
    +
  10. +
+

This change places video storage outside of the usual ~/IOTstack/volumes path, where IOTstack backup scripts will not see it.

+

An alternative approach is to omit the volume mapping for /var/lib/motioneye entirely. Clips will be still be recorded inside the container and you will be able to play and download the footage using the administrative interface. However, any saved clips will disappear each time the container is re-created (not just restarted). Clips stored inside the container also will not form part of any backup.

+

If you choose this method, make sure you configure MotionEye to discard old footage using the "Preserve Movies" field of the "Movies" section in the administrative interface. This is a per-camera setting so remember to do it for all your cameras. If you do not do this, you are still at risk of running your Pi out of disk space, and it's a difficult problem to diagnose.

+

Remote motioneye

+

If you have connected to a remote motion eye note that the directory is on that device and has nothing to do with the container.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/NextCloud/index.html b/Containers/NextCloud/index.html new file mode 100644 index 000000000..280f0b8ee --- /dev/null +++ b/Containers/NextCloud/index.html @@ -0,0 +1,2682 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Nextcloud - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Nextcloud

+

Service definition

+

This is the core of the IOTstack Nextcloud service definition:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
nextcloud:
+  container_name: nextcloud
+  image: nextcloud
+  restart: unless-stopped
+  environment:
+    - TZ=${TZ:-Etc/UTC}
+    - MYSQL_HOST=nextcloud_db
+    - MYSQL_PASSWORD=%randomMySqlPassword%
+    - MYSQL_DATABASE=nextcloud
+    - MYSQL_USER=nextcloud
+  ports:
+    - "9321:80"
+    - "9343:443"
+  volumes:
+    - ./volumes/nextcloud/html:/var/www/html
+  depends_on:
+    - nextcloud_db
+  networks:
+    - default
+    - nextcloud
+
+nextcloud_db:
+  container_name: nextcloud_db
+  build: ./.templates/mariadb/.
+  restart: unless-stopped
+  environment:
+    - TZ=${TZ:-Etc/UTC}
+    - PUID=1000
+    - PGID=1000
+    - MYSQL_ROOT_PASSWORD=%randomPassword%
+    - MYSQL_PASSWORD=%randomMySqlPassword%
+    - MYSQL_DATABASE=nextcloud
+    - MYSQL_USER=nextcloud
+  volumes:
+    - ./volumes/nextcloud/db:/config
+    - ./volumes/nextcloud/db_backup:/backup
+  networks:
+    - nextcloud
+
+

There are two containers, one for the cloud service itself, and the other for the database. Both containers share the same persistent storage area in the volumes subdirectory so they are treated as a unit. This will not interfere with any other MariaDB containers you might wish to run.

+

Key points:

+
    +
  • You do not need to select MariaDB in the IOTstack menu just to run NextCloud. Some tutorials suggest you do. They are wrong!
  • +
  • If you choose to select MariaDB in the IOTstack menu, understand that it is a separate instance of the relational database management system. It has no relationship with NextCloud.
  • +
+

Under old-menu, you are responsible for setting passwords. The passwords are "internal use only" and it is unlikely that you will need them unless you plan to go ferreting-about in the database using SQL. The rules are:

+
    +
  • The two instances of «user_password» must be the same.
  • +
  • The instance of «root_password» should be different from «user_password».
  • +
+

Under new-menu, the menu can generate random passwords for you. You can either use that feature or roll your own using the old-menu approach by replacing:

+
    +
  • Two instances of %randomMySqlPassword% (the «user_password»)
  • +
  • One instance of %randomPassword% (the «root_password»)
  • +
+

The passwords need to be set before you bring up the Nextcloud service for the first time. However, the following initialisation steps assume you might not have done that and always start from a clean slate.

+

Initialising Nextcloud

+
    +
  1. +

    Be in the correct directory:

    +
    $ cd ~/IOTstack
    +
    +
  2. +
  3. +

    If the stack is running, take it down:

    +
    $ docker-compose down
    +
    +
    +

    see also if downing a container doesn't work

    +
    +
  4. +
  5. +

    Erase the persistent storage area for Nextcloud (double-check the command before you hit return):

    +
    $ sudo rm -rf ./volumes/nextcloud
    +
    +

    This is done to force re-initialisation. In particular, it gives you assurance that the passwords in your docker-compose.yml are the ones that are actually in effect.

    +
  6. +
  7. +

    Bring up the stack:

    +
    $ docker-compose up -d
    +
    +
  8. +
  9. +

    Check for errors:

    +

    Repeat the following command two or three times at 10-second intervals:

    +
    $ docker ps
    +
    +

    You are looking for evidence that the nextcloud and nextcloud_db containers are up, stable, and not restarting. If you see any evidence of restarts, try to figure out why using:

    +
    $ docker logs nextcloud
    +
    +
  10. +
  11. +

    On a computer that is not the device running Nextcloud, launch a browser and point to the device running Nextcloud using your chosen connection method. Examples:

    +
    http://192.168.203.200:9321
    +http://myrpi.mydomain.com:9321
    +http://myrpi.local:9321
    +http://myrpi:9321
    +
    +

    The expected result is:

    +

    Create Administrator Account

    +
  12. +
  13. +

    Create an administrator account and then click "Install" and wait for the loading to complete.

    +
  14. +
  15. +

    Eventually, the dashboard will appear. Then the dashboard will be obscured by the "Nextcloud Hub" floating window which you can dismiss:

    +

    Post-initialisation

    +
  16. +
  17. +

    Congratulations. Your IOTstack implementation of Nextcloud is ready to roll:

    +

    Dashboard

    +
  18. +
+

"Access through untrusted domain"

+
+

If you are reading this because you are staring at an "access through untrusted domain" message then you have come to the right place.

+
+

Let's assume the following:

+
    +
  • You used raspi-config to give your Raspberry Pi the name "myrpi".
  • +
  • Your Raspberry Pi has the fixed IP address "192.168.203.200" (via either a static binding in your DHCP server or a static IP address on your Raspberry Pi).
  • +
+

Out of the box, a Raspberry Pi participates in multicast DNS so it will also have the mDNS name:

+
    +
  • "myrpi.local"
  • +
+

Let's also assume you have a local Domain Name System server where your Raspberry Pi:

+
    +
  • has the canonical name (A record) "myrpi.mydomain.com"; plus
  • +
  • an alias (CNAME record) of "nextcloud.mydomain.com".
  • +
+

Rolling all that together, you would expect your Nextcloud service to be reachable at any of the following URLs:

+
    +
  • http://192.168.203.200:9321
  • +
  • http://myrpi.local:9321
  • +
  • http://myrpi.mydomain.com:9321
  • +
  • http://nextcloud.mydomain.com:9321
  • +
+

To tell Nextcloud that all of those URLs are valid, you need to use sudo and your favourite text editor to edit this file:

+
~/IOTstack/volumes/nextcloud/html/config/config.php
+
+

Hint:

+
    +
  • +

    It is a good idea to make a backup of any file before you edit it. For example:

    +
    $ cd ~/IOTstack/volumes/nextcloud/html/config/
    +$ sudo cp config.php config.php.bak
    +
    +
  • +
+

Search for "trusted_domains". To tell Nextcloud to trust all of the URLs above, edit the array structure like this:

+
  'trusted_domains' =>
+  array (
+    0 => '192.168.203.200:9321',
+    1 => 'myrpi.local:9321',
+    2 => 'myrpi.mydomain.com:9321',
+    3 => 'nextcloud.mydomain.com:9321',
+  ),
+
+
+

Note: all the trailing commas are intentional!

+
+

Once you have finished editing the file, save your work then restart Nextcloud:

+
$ cd ~/IOTstack
+$ docker-compose restart nextcloud
+
+

Use docker ps to check that the container has restarted properly and hasn't gone into a restart loop.

+

See also:

+ +

Using a DNS alias for your Nextcloud service

+
+

The information in this section may be out of date. Recent tests suggest it is no longer necessary to add a hostname clause to your docker-compose.yml to silence warnings when using DNS aliases to reach your NextCloud service. This section is being left here so you will know what to do if you encounter the problem.

+
+

The examples above include using a DNS alias (a CNAME record) for your Nextcloud service. If you decide to do that, you may see this warning in the log:

+
Could not reliably determine the server's fully qualified domain name
+
+

You can silence the warning by editing the Nextcloud service definition in docker-compose.yml to add your fully-qualified DNS alias using a hostname directive. For example:

+
    hostname: nextcloud.mydomain.com
+
+

Security considerations

+

Nextcloud traffic is not encrypted. Do not expose it to the web by opening a port on your home router. Instead, use a VPN like Wireguard to provide secure access to your home network, and let your remote clients access Nextcloud over the VPN tunnel.

+

The IOTstack service definition for NextCloud reserves port 9343 for HTTPS access but leaves it as an exercise for the reader to figure out how to make it work. You may get some guidance here.

+

Container health check

+

A script , or "agent", to assess the health of the MariaDB container has been added to the local image via the Dockerfile. In other words, the script is specific to IOTstack.

+

Because it is an instance of MariaDB, Nextcloud_DB inherits the health-check agent. See the IOTstack MariaDB documentation for more information.

+

Keeping Nextcloud up-to-date

+

To update the nextcloud container:

+
$ cd ~/IOTstack
+$ docker-compose pull nextcloud
+$ docker-compose up -d nextcloud
+$ docker system prune
+
+

To update the nextcloud_db container:

+
$ cd ~/IOTstack
+$ docker-compose build --no-cache --pull nextcloud_db
+$ docker-compose up -d nextcloud_db
+$ docker system prune
+
+
+

You may need to run the prune command twice if you are using a 1.x version of docker-compose.

+
+

Backups

+

Nextcloud is currently excluded from the IOTstack-supplied backup scripts due to its potential size.

+
+

Paraphraser/IOTstackBackup includes backup and restore for NextCloud.

+
+

If you want to take a backup, something like the following will get the job done:

+
$ cd ~/IOTstack
+$ BACKUP_TAR_GZ=$PWD/backups/$(date +"%Y-%m-%d_%H%M").$HOSTNAME.nextcloud-backup.tar.gz
+$ touch "$BACKUP_TAR_GZ"
+$ docker-compose down nextcloud nextcloud_db
+$ sudo tar -czf "$BACKUP_TAR_GZ" -C "./volumes/nextcloud" .
+$ docker-compose up -d nextcloud
+
+

Notes:

+
    +
  • A baseline backup takes over 400MB and about 2 minutes. Once you start adding your own data, it will take even more time and storage.
  • +
  • The up of the NextCloud container implies the up of the Nextcloud_DB container.
  • +
  • See also if downing a container doesn't work
  • +
+

To restore, you first need to identify the name of the backup file by looking in the backups directory. Then:

+
$ cd ~/IOTstack
+$ RESTORE_TAR_GZ=$PWD/backups/2021-06-12_1321.sec-dev.nextcloud-backup.tar.gz
+$ docker-compose down nextcloud nextcloud_db
+$ sudo rm -rf ./volumes/nextcloud/*
+$ sudo tar -x --same-owner -z -f "$RESTORE_TAR_GZ" -C "./volumes/nextcloud"
+$ docker-compose up -d nextcloud
+
+

If you are running from an SD card, it would be a good idea to mount an external drive to store the data. Something like:

+

image

+

The external drive will have to be an ext4 formatted drive because smb, fat32 and NTFS can't handle Linux file permissions. If the permissions aren't set to "www-data" then the container won't be able to write to the disk.

+

Finally, a warning:

+
    +
  • If your database gets corrupted then your Nextcloud is pretty much stuffed.
  • +
+

Network Model

+

A walkthrough of a network model may help you to understand how Nextcloud and its database communicate. To help set the scene, the following model shows a Raspberry Pi with Docker running four containers:

+
    +
  • nextcloud and nextcloud_db - both added when you select "NextCloud"
  • +
  • mariadb - optional container added when you select "MariaDB"
  • +
  • wireguard - optional container added when you select "WireGuard"
  • +
+

Network Model

+

The first thing to understand is that the nextcloud_db and mariadb containers are both instances of MariaDB. They are instantiated from the same image but they have completely separate existences. They have different persistent storage areas (ie databases) and they do not share data.

+

The second thing to understand is how the networks inside the "Docker" rectangle shown in the model are created. The networks section of your compose file defines the networks:

+
networks:
+
+  default:
+    driver: bridge
+    ipam:
+      driver: default
+
+  nextcloud:
+    driver: bridge
+    internal: true
+    ipam:
+      driver: default
+
+

At run time, the lower-case representation of the directory containing the compose file (ie "iotstack") is prepended to the network names, resulting in:

+
    +
  • defaultiotstack_default
  • +
  • nextcloudiotstack_nextcloud
  • +
+

Each network is assigned a /16 IPv4 subnet. Unless you override it, the subnet ranges are chosen at random. This model assumes:

+
    +
  • iotstack_default is assigned 172.18.0.0/16
  • +
  • iotstack_nextcloud is assigned 172.19.0.0/16
  • +
+

The logical router on each network takes the .0.1 address.

+
+

The reason why two octets are devoted to the host address is because a /16 network prefix implies a 16-bit host portion. Each octet describes 8 bits.

+
+

As each container is brought up, the network(s) it joins are governed by the following rules:

+
    +
  1. If there is an explicit networks: clause in the container's service definition then the container joins the network(s) listed in the body of the clause; otherwise
  2. +
  3. The container joins the default network.
  4. +
+

Assuming that the mariadb and wireguard containers do not have networks: clauses, the result of applying those rules is shown in the following table.

+

Effect of networks clause

+

Each container is assigned an IPv4 address on each network it joins. In general, the addresses are assigned in the order in which the containers start.

+

No container can easily predict either the network prefix of the networks it joins or the IP address of any other container. However, Docker provides a mechanism for any container to reach any other container with which it shares a network by using the destination container's name.

+

In this model there are two MariaDB instances, one named nextcloud_db and the other named mariadb. How does the nextcloud container know which name to use? Simple. It's passed in an environment variable:

+
environment:
+  - MYSQL_HOST=nextcloud_db
+
+

At runtime, the nextcloud container references nextcloud_db:3306. Docker resolves nextcloud_db to 172.19.0.2 so the traffic traverses the 172.19/16 internal bridged network and arrives at the nextcloud_db container.

+

The nextcloud container could reach the mariadb container via mariadb:3306. There's no ambiguity because Docker resolves mariadb to 172.18.0.2, which is a different subnet and an entirely different internal bridged network.

+
+

There would still be no ambiguity even if all containers attached to the iotstack_default network because each container name still resolves to a distinct IP address.

+
+

In terms of external ports, only mariadb exposes port 3306. Any external process trying to reach 192.168.203.60:3306 will always be port-forwarded to the mariadb container. The iotstack_nextcloud network is declared "internal" which means it is unreachable from beyond the Raspberry Pi. Any port-mappings associated with that network are ignored.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Node-RED/index.html b/Containers/Node-RED/index.html new file mode 100644 index 000000000..a951afd90 --- /dev/null +++ b/Containers/Node-RED/index.html @@ -0,0 +1,4600 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Node-RED - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Node-RED

+

References

+ +

Significant files

+
~/IOTstack
+├── .templates
+│   └── nodered
+│       └── service.yml ❶
+├── services
+│   └── nodered
+│       ├── Dockerfile ❷
+│       └── service.yml ❸
+├── docker-compose.yml ❹
+└── volumes
+    └── nodered ❺
+        ├── data ❻
+        └── ssh ❼
+
+
    +
  1. Template service definition.
  2. +
  3. The Dockerfile.
  4. +
  5. Working service definition (old-menu only, copied from ❶).
  6. +
  7. The Compose file (includes ❶)
  8. +
  9. Persistent storage area.
  10. +
  11. Data directory (mapped volume).
  12. +
  13. SSH directory (mapped volume).
  14. +
+

How Node-RED gets built for IOTstack

+

Node-RED source code (GitHub)

+

The source code for Node-RED lives at GitHub node-red/node-red-docker.

+

Node-RED images (DockerHub)

+

Periodically, the source code is recompiled and pushed to nodered/node-red on DockerHub. See Node-RED and node.js versions for an explanation of the versioning tags associated with images on DockerHub.

+

IOTstack menu

+

When you select Node-RED in the IOTstack menu, the template service definition is copied into the Compose file.

+
+

Under old menu, it is also copied to the working service definition and then not really used.

+
+

You choose add-on nodes from a supplementary menu. We recommend accepting the default nodes, and adding others that you think you are likely to need. Node-RED will not build if you do not select at least one add-on node.

+

Key points:

+
    +
  • Under new menu, you must press the right arrow to access the supplementary menu. Under old menu, the list of add-on nodes is displayed automatically.
  • +
  • Do not be concerned if you can't find an add-on node you need in the list. You can also add nodes via Manage Palette once Node-RED is running. See component management.
  • +
+

Choosing add-on nodes in the menu causes the Dockerfile to be created.

+

IOTstack first run

+

On a first install of IOTstack, you are told to do this:

+
$ cd ~/IOTstack
+$ docker-compose up -d
+
+

docker-compose reads the Compose file. When it arrives at the nodered service definition, it finds:

+
1
+2
+3
+4
+5
+6
+7
  nodered:
+    container_name: nodered
+    build:
+      context: ./services/nodered/.
+      args:
+      - DOCKERHUB_TAG=latest
+      - EXTRA_PACKAGES=
+
+

Note:

+
    +
  • +

    Prior to July 2022, IOTstack used the following one-line syntax for the build directive:

    +
    3
        build: ./services/nodered/.
    +
    +

    The older syntax meant all local customisations (version-pinning and adding extra packages) needed manual edits to the Dockerfile. Those edits would be overwritten each time the menu was re-run to alter the selected add-on nodes. The newer multi-line syntax avoids that problem.

    +

    See also updating to July 2022 syntax.

    +
  • +
+

In either case, the path ./services/nodered/. tells docker-compose to look for:

+
~/IOTstack/services/nodered/Dockerfile
+
+

which contains instructions to download a base image from DockerHub and then apply local customisations such as the add-on nodes you chose in the IOTstack menu. The result is a local image which is instantiated to become your running container.

+

Notes:

+
    +
  1. During the build you may see warnings and deprecation notices. You may also see messages about "vulnerabilities" along with recommendations to run npm audit fix. You should ignore all such messages. There is no need to take any action.
  2. +
  3. +

    If SQLite is in your list of nodes, be aware that it needs to be compiled from its source code. It takes a long time, outputs an astonishing number of warnings and, from time to time, will look as if it has gotten stuck. Be patient.

    +
    +

    Acknowledgement: Successful installation of the SQLite node is thanks to @fragolinux.

    +
    +
  4. +
+

When you run the docker images command after Node-RED has been built, you may see two rows for Node-RED:

+
$ docker images
+REPOSITORY               TAG                 IMAGE ID            CREATED             SIZE
+iotstack_nodered         latest              b0b21a97b8bb        4 days ago          462MB
+nodered/node-red         latest              deb99584fa75        5 days ago          385MB
+
+
    +
  • nodered/node-red is the base image; and
  • +
  • iotstack_nodered is the local image. The local image is the one that is instantiated to become the running container.
  • +
+

You may see the same pattern in Portainer, which reports the base image as "unused":

+

nodered-portainer-unused-image

+

You should not remove the base image, even though it appears to be unused.

+
+

Whether you see one or two rows depends on the version of Docker you are using and how your version of docker-compose builds local images.

+
+

Securing Node-RED

+

Setting an encryption key for your credentials

+

After you install Node-RED, you should set an encryption key. Completing this step will silence the warning you will see when you run:

+
$ docker logs nodered
+
+---------------------------------------------------------------------
+Your flow credentials file is encrypted using a system-generated key.
+
+If the system-generated key is lost for any reason, your credentials
+file will not be recoverable, you will have to delete it and re-enter
+your credentials.
+
+You should set your own key using the 'credentialSecret' option in
+your settings file. Node-RED will then re-encrypt your credentials
+file using your chosen key the next time you deploy a change.
+---------------------------------------------------------------------
+
+
+

Setting an encryption key also means that any credentials you create will be portable, in the sense that you can backup Node-RED on one machine and restore it on another.

+

The encryption key can be any string. For example, if you have UUID support installed (sudo apt install -y uuid-runtime), you could generate a UUID as your key:

+
$ uuidgen
+2deb50d4-38f5-4ab3-a97e-d59741802e2d
+
+

Once you have defined your encryption key, use sudo and your favourite text editor to open this file:

+
~/IOTstack/volumes/nodered/data/settings.js
+
+

Search for credentialSecret:

+
    //credentialSecret: "a-secret-key",
+
+

Un-comment the line and replace a-secret-key with your chosen key. Do not remove the comma at the end of the line. The result should look something like this:

+
    credentialSecret: "2deb50d4-38f5-4ab3-a97e-d59741802e2d",
+
+

Save the file and then restart Node-RED:

+
$ cd ~/IOTstack
+$ docker-compose restart nodered
+
+

Setting a username and password for Node-RED

+

To secure Node-RED you need a password hash. Run the following command, replacing PASSWORD with your own password:

+
$ docker exec nodered node -e "console.log(require('bcryptjs').hashSync(process.argv[1], 8));" PASSWORD
+
+

You will get an answer that looks something like this:

+
$2a$08$gTdx7SkckJVCw1U98o4r0O7b8P.gd5/LAPlZI6geg5LRg4AUKuDhS
+
+

Copy that text to your clipboard, then follow the instructions at Node-RED User Guide - Securing Node-RED - Username & Password-based authentication.

+

Referring to other containers

+

Node-RED can run in two modes. By default, it runs in "non-host mode" but you can also move the container to "host mode" by editing the Node-RED service definition in your Compose file to:

+
    +
  1. +

    Add the following directive:

    +
    network_mode: host
    +
    +
  2. +
  3. +

    Remove the ports directive and the mapping of port 1880.

    +
  4. +
+

When Node-RED is not in host mode

+

Most examples on the web assume Node-RED and other services in the MING (Mosquitto, InfluxDB, Node-RED, Grafana) stack have been installed natively, rather than in Docker containers. Those examples typically include the loopback address + port syntax, like this:

+
127.0.0.1:1883
+
+

The loopback address will not work when Node-RED is in non-host mode. This is because each container behaves like a self-contained computer. The loopback address means "this container". It does not mean "this Raspberry Pi".

+

You refer to other containers by their container name. For example, a flow subscribing to an MQTT feed provided by the mosquitto container uses:

+
mosquitto:1883
+
+

Similarly, if a flow writes to an InfluxDB database maintained by the influxdb container, the flow uses:

+
influxdb:8086
+
+

Behind the scenes, Docker maintains a table, similar to an /etc/hosts file, mapping container names to the IP addresses on the internal bridged network that are assigned, dynamically, by Docker, when it spins up each container.

+

When Node-RED is in host mode

+

This is where you use loopback+port syntax, such as the following to communicate with Mosquitto:

+
127.0.0.1:1883
+
+

What actually occurs is that Docker is listening to external port 1883 on behalf of Mosquitto. It receives the packet and routes it (layer three) to the internal bridged network, performing network address translation (NAT) along the way to map the external port to the internal port. Then the packet is delivered to Mosquitto. The reverse happens when Mosquitto replies. It works but is less efficient than when all containers are in non-host mode.

+

Referring to the host

+

When the container is running in non-host mode, there are several ways in which it can refer to the host on which the container is running:

+
    +
  1. via the IP address of one of the host's interfaces;
  2. +
  3. via the fully-qualified domain name of the host (ie same as the above but via the Domain Name System)
  4. +
  5. via the default gateway on the Docker bridge network.
  6. +
+

The problem with the first two is that they tie your flows to the specific host.

+

The third method is portable, meaning a flow can conceptually refer to "this" host and be independent of the actual host on which the container is running.

+

Bridge network - default gateway

+
    +
  • +

    Method 1

    +

    The default gateway on the Docker bridge network is usually "172.17.0.1". You can confirm the IP address by running:

    +
    $ docker network inspect bridge | jq .[0].IPAM.Config[0].Gateway
    +"172.17.0.1"
    +
    +
    +

    If jq is not installed on your system, you can install it by running sudo apt install -y jq.

    +
    +

    If you use this method, your flows can refer to "this" host using the IP address "172.17.0.1".

    +
  • +
  • +

    Method 2

    +

    Alternatively, you can add the following lines to your Node-RED service definition:

    +
    extra_hosts:
    +- "host.docker.internal:host-gateway"
    +
    +

    If you use this method, your flows can refer to "this" host using the domain name "host.docker.internal".

    +

    Generally the second method is recommended for IOTstack. That is because your flows will continue to work even if the 172.17.0.1 IP address changes. However, it does come with the disadvantage that, if you publish a flow containing this domain name, the flow will not work unless the recipient also adds the extra_hosts clause.

    +
  • +
+

GPIO Access

+

To communicate with your Raspberry Pi's GPIO you need to do the following:

+
    +
  1. +

    Install dependencies:

    +
    $ sudo apt update
    +$ sudo apt install pigpio python-pigpio python3-pigpio
    +
    +

    Notes:

    +
      +
    • pigpio and python3-pigpio are usually installed by default in standard releases of Raspberry Pi OS.
    • +
    • Only pigpio is actually required.
    • +
    • The Python packages are optional.
    • +
    +
  2. +
  3. +

    Install the node-red-node-pi-gpiod node. See component management. It allows you to connect to multiple Pis from the same Node-RED service.

    +

    Note:

    +
      +
    • Unless you explicitly removed node-red-node-pi-gpiod from the list of add-on nodes added to your Dockerfile by the IOTstack menu, it will be installed already. You can confirm this by examining your Node-RED Dockerfile ❷.
    • +
    +
  4. +
  5. +

    Configure the pigpdiod daemon:

    +
      +
    • +

      copy the following text to the clipboard:

      +
      1
      +2
      +3
      +4
      +5
      +6
      +7
      +8
      +9
      [Unit]
      +Requires=default.target
      +After=default.target
      +[Service]
      +ExecStart=
      +ExecStart=/usr/bin/pigpiod
      +[Install]
      +WantedBy=
      +WantedBy=default.target
      +
      +
      +

      Acknowledgement: some of the above from joan2937/pigpio issue 554

      +
      +
    • +
    • +

      execute the following commands:

      +
      $ sudo systemctl stop pigpiod
      +$ sudo systemctl revert pigpiod
      +$ sudo systemctl edit pigpiod
      +
      +
    • +
    • +

      follow the on-screen instructions and paste the contents of the clipboard into the blank area between the lines. The final result should be (lines 4…12 are the pasted material):

      +
       1
      + 2
      + 3
      + 4
      + 5
      + 6
      + 7
      + 8
      + 9
      +10
      +11
      +12
      +13
      +14
      +15
      +16
      +17
      +18
      +19
      +20
      +21
      +22
      +23
      +24
      ### Editing /etc/systemd/system/pigpiod.service.d/override.conf
      +### Anything between here and the comment below will become the new contents of the file
      +
      +[Unit]
      +Requires=default.target
      +After=default.target
      +[Service]
      +ExecStart=
      +ExecStart=/usr/bin/pigpiod
      +[Install]
      +WantedBy=
      +WantedBy=default.target
      +
      +### Lines below this comment will be discarded
      +
      +### /lib/systemd/system/pigpiod.service
      +# [Unit]
      +# Description=Daemon required to control GPIO pins via pigpio
      +# [Service]
      +# ExecStart=/usr/bin/pigpiod -l
      +# ExecStop=/bin/systemctl kill pigpiod
      +# Type=forking
      +# [Install]
      +# WantedBy=multi-user.target
      +
      +
    • +
    • +

      Save your work by pressing:

      +
        +
      • control+O (letter O not zero)
      • +
      • return
      • +
      • control+X
      • +
      +
    • +
    • +

      Check your work by running:

      +
      $ sudo systemctl cat pigpiod
      +
      +

      The expected result is:

      +
       1
      + 2
      + 3
      + 4
      + 5
      + 6
      + 7
      + 8
      + 9
      +10
      +11
      +12
      +13
      +14
      +15
      +16
      +17
      +18
      +19
      +20
      # /lib/systemd/system/pigpiod.service
      +[Unit]
      +Description=Daemon required to control GPIO pins via pigpio
      +[Service]
      +ExecStart=/usr/bin/pigpiod -l
      +ExecStop=/bin/systemctl kill pigpiod
      +Type=forking
      +[Install]
      +WantedBy=multi-user.target
      +
      +# /etc/systemd/system/pigpiod.service.d/override.conf
      +[Unit]
      +Requires=default.target
      +After=default.target
      +[Service]
      +ExecStart=
      +ExecStart=/usr/bin/pigpiod
      +[Install]
      +WantedBy=
      +WantedBy=default.target
      +
      +

      Lines 12…20 should be those you copied to the clipboard at the start of this step. If you do not see the expected result, go back and start from the beginning of this step.

      +
    • +
    • +

      Activate the daemon:

      +
      $ sudo systemctl enable pigpiod
      +$ sudo systemctl start pigpiod
      +
      +
    • +
    • +

      Reboot.

      +
    • +
    • +

      Check that the daemon is running:

      +
      $ sudo systemctl status pigpiod
      +
      +

      Once you have configured pigpiod correctly and it has come up after a reboot, you should not need to worry about it again.

      +

      pigpiod provides open access to your Raspberry Pi's GPIO via port 8888. Consult the man pages if you want to make it more secure. Once you have decided what to do, start over from the beginning of this step, and add your parameters to the line:

      +
      6
      ExecStart=/usr/bin/pigpiod
      +
      +
    • +
    +
  6. +
  7. +

    Drag a pi gpio node onto the canvas. Configure it according to your needs.

    +

    The Host field should be set to one of:

    +
      +
    • 172.17.0.1; or
    • +
    • host.docker.internal
    • +
    +

    See also Bridge network - default gateway.

    +

    Don't try to use 127.0.0.1 because that is the loopback address of the Node-RED container.

    +
  8. +
+

Serial Port Access

+

Node-RED running in a container can communicate with serial devices attached to your Raspberry Pi's USB ports. However, it does not work "out of the box". You need to set it up.

+

Let's make an assumption. A device connected to one of your Raspberry Pi's USB ports presents itself as:

+
/dev/ttyUSB0
+
+

You have three basic options:

+
    +
  1. +

    You can map the device into the container using that name:

    +
    devices:
    +- 
    +  - "/dev/ttyUSB0:/dev/ttyUSB0"
    +
    +

    This is simple and effective but it suffers from a few problems:

    +
      +
    • If the device is disconnected while the container is running, there's a good chance the container will crash.
    • +
    • docker-compose will not start your container if the device is not present when you bring up your stack.
    • +
    • You can't guarantee that the device will always enumerate as "ttyUSB0". It might come up as "ttyUSB1".
    • +
    +

    You can deal with the last problem by using the device's "by-id" path. There's an example of this in the Zigbee2MQTT documentation.

    +

    Options 2 and 3 (below) deal with the first two problems in the sense that:

    +
      +
    • a device disconnection is unlikely to cause the container to crash (the flow might);
    • +
    • docker-compose will always start the container, irrespective of whether devices are actually attached to your USB ports.
    • +
    +

    Options 2 and 3 (below) can't provide a workaround for devices being given different names via enumeration but you can still deal with that by using the device's "by-id" path (as explained above).

    +
  2. +
  3. +

    You can map a class of devices:

    +
      +
    • +

      modify the volumes clause to add a read-only mapping for /dev:

      +
      volumes:
      +- 
      +- /dev:/dev:ro
      +
      +
      +

      The "read-only" flag (:ro) prevents the container from doing dangerous things like destroying your Raspberry Pi's SD or SSD. Please don't omit that flag!

      +
      +
    • +
    • +

      discover the major number for your device:

      +
      $ ls -l /dev/ttyUSB0
      +crw-rw---- 1 root dialout 188, 0 Feb 18 15:30 /dev/ttyUSB0
      +
      +

      In the above, the 188, 0 string means the major number for ttyUSB0 is "188" and "0" the minor number.

      +
    • +
    • +

      add two device CGroup rules:

      +
      device_cgroup_rules:
      +- 'c 1:* rw' # access to devices like /dev/null
      +- 'c 188:* rmw' # change numbers to your device
      +
      +

      In the above:

      +
        +
      • +

        "188" is the major number for ttyUSB0 and you should substitute accordingly if your device has a different major number.

        +
      • +
      • +

        the "*" is a wildcard for the minor number.

        +
      • +
      +
    • +
    +
  4. +
  5. +

    Use the "privileged" flag by adding the following to your Node-RED service definition:

    +
    privileged: true
    +
    +

    Please make sure you read the following references BEFORE you select this option:

    + +
  6. +
+

node-red-node-serialport

+

At the time of writing (Feb 2023), it was not possible to add node-red-node-serialport to the list of nodes in your Dockerfile. Attempting to do so crashed the Node-RED container with a segmentation fault. The workaround is to build the node from source by adding an extra line at the end of your Dockerfile:

+
RUN npm install node-red-node-serialport --build-from-source
+
+

Sharing files between Node-RED and the Raspberry Pi

+

Containers run in a sandboxed environment. A process running inside a container can't see the Raspberry Pi's file system. Neither can a process running outside a container access files inside the container.

+

This presents a problem if you want write to a file outside a container, then read from it inside the container, or vice-versa.

+

IOTstack containers have been set up with shared volume mappings. Each volume mapping associates a specific directory in the Raspberry Pi file system with a specific directory inside the container. If you write to files in a shared directory (or one of its sub-directories), both the host and the container can see the same sub-directories and files.

+

Key point:

+
    +
  • Files and directories in the shared volume are persistent between restarts. If you save your data anywhere else inside the container, it will be lost when the container is rebuilt.
  • +
+

The Node-RED service definition in the Compose file includes the following:

+
volumes:
+  - ./volumes/nodered/data:/data
+
+

That decomposes into:

+
    +
  • external path = ./volumes/nodered/data
  • +
  • internal path = /data
  • +
+

The leading "." on the external path implies "the folder containing the Compose file so it actually means:

+
    +
  • external path = ~/IOTstack/volumes/nodered/data
  • +
  • internal path = /data
  • +
+

If you write to the internal path from inside the Node-RED container, the Raspberry Pi will see the results at the external path, and vice versa. Example:

+
$ docker exec -it nodered bash
+# echo "The time now is $(date)" >/data/example.txt
+# cat /data/example.txt 
+The time now is Thu Apr  1 11:25:56 AEDT 2021
+# exit
+$ cat ~/IOTstack/volumes/nodered/data/example.txt 
+The time now is Thu Apr  1 11:25:56 AEDT 2021
+$ sudo rm ~/IOTstack/volumes/nodered/data/example.txt
+
+

In words:

+
    +
  1. +

    Open a shell into the Node-RED container. Two things happen:

    +
      +
    • You are now inside the container. Any commands you execute while in this shell are run inside the container; and
    • +
    • The prompt changes to a "#" indicating that you are running as the "root" user, meaning you don't need sudo for anything.
    • +
    +
  2. +
  3. +

    Use the echo command to create a small file which embeds the current timestamp. The path is in the /data directory which is mapped to the Raspberry Pi's file system.

    +
  4. +
  5. Show that the file has been created inside the container.
  6. +
  7. Exit the shell:
      +
    • You can either type the exit command and press return, or press Control+D.
    • +
    • Exiting the shell drops you out of the container so the "$" prompt returns, indicating that you are outside the Node-Red container, running as a non-root user ("pi").
    • +
    +
  8. +
  9. Show that the same file can be seen from outside the container.
  10. +
  11. Tidy-up by removing the file. You need sudo to do that because the persistent storage area at the external path is owned by root, and you are running as user "pi".
  12. +
+

You can do the same thing from within a Node-RED flow.

+

image

+

The flow comprises:

+
    +
  • +

    An Inject node, wired to a Template node.

    +
      +
    • When an Inject node's input tab is clicked, it sets the message payload to the number of seconds since 1/1/1970 UTC and triggers the flow.
    • +
    +
  • +
  • +

    A Template node, wired to both a Debug node and a File node. The template field is set to:

    +
    The time at the moment is {{payload}} seconds since 1/1/1970 UTC !
    +
    +
      +
    • When this node runs, it replaces {{payload}} with the seconds value supplied by the Inject node.
    • +
    +
  • +
  • +

    A Debug node.

    +
      +
    • When this node runs, it displays the payload in the debug window on the right hand side of the Node-RED GUI.
    • +
    +
  • +
  • +

    A File node. The "Filename" field of the node is set to write to the path:

    +
    /data/flow-example.txt
    +
    +
      +
    • When this node runs, it writes the payload to the specified file. Remember that /data is an internal path within the Node-RED container.
    • +
    +
  • +
+

Deploying the flow and clicking on the Inject node results in the debug message shown on the right hand side of the screen shot. The embedded terminal window shows that the same information is accessible from outside the container.

+

You can reverse this process. Any file you place within the path ~/IOTstack/volumes/nodered/data can be read by a "File in" node.

+

Executing commands outside the Node-RED container

+

A reasonably common requirement in a Node-RED flow is the ability to execute a command on the host system. The standard tool for this is an "exec" node.

+

An "exec" node works as expected when Node-RED is running as a native service but not when Node-RED is running in a container. That's because the command spawned by the "exec" node runs inside the container.

+

To help you understand the difference, consider this command:

+
$ grep "^PRETTY_NAME=" /etc/os-release
+
+

When you run that command on a Raspberry Pi outside container-space, the answer will be something like:

+
PRETTY_NAME="Debian GNU/Linux 11 (bullseye)"
+
+

If you run the same command inside a Node-RED container, the output will reflect the operating system upon which the container is based, such as:

+
PRETTY_NAME="Alpine Linux v3.16"
+
+

The same thing will happen if a Node-RED "exec" node executes that grep command when Node-RED is running in a container. It will see the "Alpine Linux" answer.

+

Docker doesn't provide any mechanism for a container to execute an arbitrary command outside of its container. A workaround is to utilise SSH. This remainder of this section explains how to set up the SSH scaffolding so that "exec" nodes running in a Node-RED container can invoke arbitrary commands outside container-space.

+

Task Goal

+

Be able to use a Node-RED "exec" node to perform the equivalent of:

+
$ ssh host.docker.internal «COMMAND»
+
+

where «COMMAND» is any command known to the target host.

+

This section uses host.docker.internal throughout. That name comes from method 2 of bridge network - default gateway but, in principle, you can refer to the host using any mechanism described in referring to the host.

+

Assumptions

+
    +
  • SensorsIot/IOTstack is installed on your Raspberry Pi.
  • +
  • The Node-RED container is running.
  • +
  • The user name of the account on the host where you want Node-RED flows to be able to run commands is "pi". This user name is not mandatory. Simply substitute your own user name wherever you see "pi" in these examples.
  • +
+

These instructions are specific to IOTstack but the underlying concepts should apply to any installation of Node-RED in a Docker container.

+

Executing commands "inside" a container

+

These instructions make frequent use of the ability to run commands "inside" the Node-RED container. For example, suppose you want to execute:

+
$ grep "^PRETTY_NAME=" /etc/os-release
+
+

You have several options:

+
    +
  1. +

    You can do it from the normal Raspberry Pi command line using a Docker command. The basic syntax is:

    +
    $ docker exec {-it} «containerName» «command and parameters»
    +
    +

    The actual command you would need would be:

    +
    $ docker exec nodered grep "^PRETTY_NAME=" /etc/os-release
    +
    +

    Note:

    +
      +
    • The -it flags are optional. They mean "interactive" and "allocate pseudo-TTY". Their presence tells Docker that the command may need user interaction, such as entering a password or typing "yes" to a question.
    • +
    +
  2. +
  3. +

    You can open a shell into the container, run as many commands as you like inside the container, and then exit. For example:

    +
    $ docker exec -it nodered bash
    +# grep "^PRETTY_NAME=" /etc/os-release
    +# whoami
    +# exit
    +$
    +
    +

    In words:

    +
      +
    • Run the bash shell inside the Node-RED container. You need to be able to interact with the shell to type commands so the -it flag is required.
    • +
    • The "#" prompt is coming from bash running inside the container. It also signals that you are running as the root user inside the container.
    • +
    • You run the grep, whoami and any other commands.
    • +
    • You finish with the exit command (or Control+D).
    • +
    • The "$" prompt means you have left the container and are back at the normal Raspberry Pi command line.
    • +
    +
  4. +
  5. +

    Run the command from Portainer by selecting the container, then clicking the ">_ console" link. This is identical to opening a shell.

    +
  6. +
+

Step 1: Generate SSH key-pair for Node-RED (one time)

+

Create a key-pair for Node-RED. This is done by executing the ssh-keygen command inside the container:

+
$ docker exec -it nodered ssh-keygen -q -t ed25519 -C "Node-RED container key-pair" -N ""
+
+

Notes:

+
    +
  • The "ed25519" elliptic curve algorithm is recommended (generally described as quicker and more secure than RSA) but you can use the default RSA algorithm if you prefer.
  • +
  • Respond to the "Enter file in which to save the key" prompt by pressing return to accept the default location.
  • +
  • If ssh-keygen displays an "Overwrite (y/n)?" message, it implies that a key-pair already exists. You will need to decide what to do:
      +
    • press y to overwrite (and lose the old keys)
    • +
    • press n to terminate the command, after which you can investigate why a key-pair already exists.
    • +
    +
  • +
+

Step 2: Exchange keys with target hosts (once per target host)

+

Node-RED's public key needs to be copied to the "pi" user account on the host where you want a Node-RED "exec" node to be able to execute commands. At the same time, the Node-RED container needs to learn the host's public key. The ssh-copy-id command does both steps. The command is:

+
$ docker exec -it nodered ssh-copy-id pi@host.docker.internal
+
+

The output will be something similar to the following:

+
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_ed25519.pub"
+The authenticity of host 'host.docker.internal (172.17.0.1)' can't be established.
+ED25519 key fingerprint is SHA256:gHMlhvArbUPJ807vh5qNEuyRCeNUQQTKEkmDS6qKY6c.
+This key is not known by any other names
+Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
+
+

Respond to the prompt by typing "yes" and pressing return.

+

The output continues:

+

/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
+expr: warning: '^ERROR: ': using '^' as the first character
+of a basic regular expression is not portable; it is ignored
+/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
+pi@host.docker.internal's password: 
+
+The response may look like it contains errors but those can be ignored.

+

Enter the password you use to login as "pi" on the host and press return.

+

Normal completion looks similar to this:

+
Number of key(s) added: 1
+
+Now try logging into the machine, with:   "ssh 'pi@host.docker.internal'"
+and check to make sure that only the key(s) you wanted were added.
+
+

If you do not see an indication that a key has been added, you may need to retrace your steps.

+

Step 3: Perform the recommended test

+

The output above recommends a test. The test needs to be run inside the Node-RED container so the syntax is:

+
$ docker exec -it nodered ssh pi@host.docker.internal ls -1 /home/pi/IOTstack
+
+

You should not be prompted for a password. If you are, you may need to retrace your steps.

+

If everything works as expected, you should see a list of the files in your IOTstack folder.

+

Assuming success, think about what just happened? You told SSH inside the Node-RED container to run the ls command outside the container on your Raspberry Pi. You broke through the containerisation.

+

Understanding what's where and what each file does

+

What files are where

+

Six files are relevant to Node-RED's ability to execute commands outside of container-space:

+
    +
  • +

    in /etc/ssh:

    +
      +
    • ssh_host_ed25519_key is the Raspberry Pi's private host key
    • +
    • +

      ssh_host_ed25519_key.pub is the Raspberry Pi's public host key

      +

      Those keys were created when your Raspberry Pi was initialised. They are unique to the host.

      +

      Unless you take precautions, those keys will change whenever your Raspberry Pi is rebuilt from scratch and that will prevent a Node-RED "exec" node from being able to invoke SSH to call out of the container.

      +

      You can recover by re-running ssh-copy-id.

      +
    • +
    +
  • +
  • +

    in ~/IOTstack/volumes/nodered/ssh:

    +
      +
    • id_ed25519 is the Node-RED container's private key
    • +
    • +

      id_ed25519.pub is the Node-RED container's public key

      +

      Those keys were created when you generated the SSH key-pair for Node-RED.

      +

      They are unique to Node-RED but will follow the container in backups and will work on the same machine, or other machines, if you restore the backup.

      +

      It does not matter if the Node-RED container is rebuilt or if a new version of Node-RED comes down from DockerHub. These keys will remain valid until lost or overwritten.

      +

      If you lose or destroy these keys, that will prevent a Node-RED "exec" node from being able to invoke SSH to call out of the container.

      +

      You can recover by generating new keys and then re-running ssh-copy-id.

      +
    • +
    • +

      known_hosts

      +

      The known_hosts file contains a copy of the Raspberry Pi's public host key. It was put there by ssh-copy-id.

      +

      If you lose this file or it gets overwritten, invoking SSH inside the container will still work but it will re-prompt for authorisation to connect. You will see the prompt if you run commands via docker exec -it but not when invoking SSH from an "exec" node.

      +

      Note that authorising the connection at the command line ("Are you sure you want to continue connecting?") will auto-repair the known_hosts file.

      +
    • +
    +
  • +
  • +

    in ~/.ssh/:

    +
      +
    • +

      authorized_keys

      +

      That file contains a copy of the Node-RED container's public key. It was put there by ssh-copy-id.

      +

      Pay attention to the path. It implies that there is one authorized_keys file per user, per target host.

      +

      If you lose this file or it gets overwritten, SSH will still work but will ask for the password for "pi". This works when you are running commands from docker exec -it but not when invoking SSH from an "exec" node.

      +

      Note that providing the correct password at the command line will auto-repair the authorized_keys file.

      +
    • +
    +
  • +
+

What each file does

+

SSH running inside the Node-RED container uses the Node-RED container's private key to provide assurance to SSH running outside the container that it (the Node-RED container) is who it claims to be.

+

SSH running outside container-space verifies that assurance by using its copy of the Node-RED container's public key in authorized_keys.

+

SSH running outside container-space uses the Raspberry Pi's private host key to provide assurance to SSH running inside the Node-RED container that it (the RPi) is who it claims to be.

+

SSH running inside the Node-RED container verifies that assurance by using its copy of the Raspberry Pi's public host key stored in known_hosts.

+

Config file (optional)

+

You don't have to do this step but it will simplify your exec node commands and reduce your maintenance problems if you do.

+

At this point, SSH commands can be executed from inside the container using this syntax:

+
# ssh pi@host.docker.internal «COMMAND»
+
+

A config file is needed to achieve the task goal of the simpler syntax:

+
# ssh host.docker.internal «COMMAND»
+
+

The goal is to set up this file:

+
-rw-r--r-- 1 root root ~/IOTstack/volumes/nodered/ssh/config
+
+

The file needs the ownership and permissions shown. There are several ways of going about this and you are free to choose the one that works for you. The method described here creates the file first, then sets correct ownership and permissions, and then moves the file into place.

+

Start in a directory where you can create a file without needing sudo. The IOTstack folder is just as good as anywhere else:

+
$ cd ~/IOTstack
+$ touch config
+
+

Select the following text, copy it to the clipboard.

+
host host.docker.internal
+  user pi
+  IdentitiesOnly yes
+  IdentityFile /root/.ssh/id_ed25519
+
+

Open ~/IOTstack/config in your favourite text editor and paste the contents of the clipboard. Save the file. Change the config file's ownership and permissions, and move it into the correct directory:

+
$ chmod 644 config
+$ sudo chown root:root config
+$ sudo mv config ./volumes/nodered/ssh
+
+

Re-test with config file in place

+

The previous test used this syntax:

+
$ docker exec nodered ssh pi@host.docker.internal ls -1 /home/pi/IOTstack
+
+

Now that the config file is in place, the syntax changes to:

+
$ docker exec nodered ssh host.docker.internal ls -1 /home/pi/IOTstack
+
+

The result should be the same as the earlier test.

+

A test flow

+

node-red-exec-node-ssh-test

+

In the Node-RED GUI:

+
    +
  1. Click the "+" to create a new, empty flow.
  2. +
  3. Drag the following nodes onto the canvas:
      +
    • One "inject" node
    • +
    • Two "exec" nodes
    • +
    • Two "debug" nodes
    • +
    +
  4. +
  5. Wire the outlet of the "inject" node to the inlet of both "exec" nodes.
  6. +
  7. Wire the uppermost "stdout" outlet of the first "exec" node to the inlet of the first "debug" node.
  8. +
  9. Repeat step 4 with the other "exec" and "debug" node.
  10. +
  11. +

    Open the first "exec" node and:

    +
      +
    • +

      set the "command" field to:

      +

      grep "^PRETTY_NAME=" /etc/os-release
      +
      + - turn off the "append msg.payload" checkbox + - set the timeout to a reasonable value (eg 10 seconds) + - click "Done". +7. Repeat step 6 with the other "exec" node, with one difference: + - set the "command" field to:

      +
      ssh host.docker.internal grep "^PRETTY_NAME=" /etc/os-release
      +
      +
    • +
    +
  12. +
  13. +

    Click the Deploy button.

    +
  14. +
  15. Set the right hand panel to display debug messages.
  16. +
  17. Click the touch panel of the "inject" node to trigger the flow.
  18. +
  19. +

    Inspect the result in the debug panel. You should see payload differences similar to the following:

    +
    PRETTY_NAME="Alpine Linux v3.16""
    +PRETTY_NAME="Debian GNU/Linux 11 (bullseye)"
    +
    +

    The first line is the result of running the command inside the Node-RED container. The second line is the result of running the same command outside the Node-RED container on the Raspberry Pi.

    +
  20. +
+

Maintaining Node-RED

+

Starting Node-RED

+

Use these commands to:

+
    +
  1. Start the container; or
  2. +
  3. Re-create the container if you have made a material change to the container's service definition in your Compose file.
  4. +
+
$ cd ~/IOTstack
+$ docker-compose up -d nodered
+
+

The first time you execute this command, the base image of Node-RED is downloaded from DockerHub, and then the Dockerfile is run to produce a local image. The local image is then instantiated to become the running container.

+

Stopping Node-RED

+

To stop the running container:

+
$ cd ~/IOTstack
+$ docker-compose down nodered
+
+
+

see also if downing a container doesn't work

+
+

Alternatively, you can stop the entire stack:

+
$ cd ~/IOTstack
+$ docker-compose down
+
+

Restarting Node-RED

+

The restart command sends a signal to the processes running within the container. The container itself does not stop.

+
$ cd ~/IOTstack
+$ docker-compose restart nodered
+
+

Re-building the local image

+

You need to rebuild the local image if you do any of the following:

+
    +
  1. Change either of the build arguments (DOCKERHUB_TAG or EXTRA_PACKAGES) in your Compose file.
  2. +
  3. Make a material change to your Node-RED Dockerfile, such as re-running the menu to change your selection of add-on nodes.
  4. +
+

To rebuild your local image:

+
$ cd ~/IOTstack
+$ docker-compose up --build -d nodered
+$ docker system prune
+
+

Think of these commands as "re-running the Dockerfile". The only time a base image will be downloaded from DockerHub is when a base* image with a tag matching the value of DOCKERHUB_TAG can't be found on your Raspberry Pi.

+

Your existing Node-RED container continues to run while the rebuild proceeds. Once the freshly-built local image is ready, the up tells docker-compose to do a new-for-old swap. There is barely any downtime for your Node-RED service.

+

Checking for Node-RED updates

+

IOTstack provides a convenience script which can help you work out if a new version of Node-RED is available. You can run it like this:

+
$ ~/IOTstack/scripts/nodered_version_check.sh
+
+

The script is not infallible. It works by comparing the version number in the Node-RED image on your system with a version number stored on GitHub.

+

GitHub is always updated before a new image appears on DockerHub. Sometimes there is a delay of weeks between the two events. For that reason, the script should be viewed more like a meteorological forecast than hard fact.

+

The script assumes that your local image builds as iotstack-nodered:latest. If you use different tags, you can pass that information to the script. Example:

+
$ ~/IOTstack/scripts/nodered_version_check.sh iotstack-nodered:3.0.2
+
+

Upgrading Node-RED

+

The only way to know, for certain, when an update to Node-RED is available is to check the nodered/node-red tags page on DockerHub.

+

Once a new version appears on DockerHub, you can upgrade Node-RED like this:

+
$ cd ~/IOTstack
+$ docker-compose build --no-cache --pull nodered
+$ docker-compose up -d nodered
+$ docker system prune
+
+

Breaking it down into parts:

+
    +
  • build causes the named container to be rebuilt;
  • +
  • --no-cache tells the Dockerfile process that it must not take any shortcuts. It really must rebuild the local image;
  • +
  • --pull tells the Dockerfile process to actually check with DockerHub to see if there is a later version of the base image and, if so, to download it before starting the build;
  • +
  • nodered is the named container argument required by the build command.
  • +
+

Your existing Node-RED container continues to run while the rebuild proceeds. Once the freshly-built local image is ready, the up tells docker-compose to do a new-for-old swap. There is barely any downtime for your Node-RED service.

+

The prune is the simplest way of cleaning up old images. Sometimes you need to run this twice, the first time to clean up the old local image, the second time for the old base image. Whether an old base image exists depends on the version of docker-compose you are using and how your version of docker-compose builds local images.

+

Node-RED and node.js versions

+

Checking versions

+

You can use the npm version command to check which versions of Node-RED and node.js are running in your container:

+
$ docker exec nodered npm version
+{
+  'node-red-docker': '2.2.2',
+  npm: '6.14.15',
+  ares: '1.18.1',
+  brotli: '1.0.9',
+  cldr: '37.0',
+  http_parser: '2.9.4',
+  icu: '67.1',
+  llhttp: '2.1.4',
+  modules: '72',
+  napi: '8',
+  nghttp2: '1.41.0',
+  node: '12.22.8',
+  openssl: '1.1.1m',
+  tz: '2019c',
+  unicode: '13.0',
+  uv: '1.40.0',
+  v8: '7.8.279.23-node.56',
+  zlib: '1.2.11'
+}
+
+

In the above:

+
    +
  • 'node-red-docker': '2.2.2' indicates that version 2.2.2 of Node-RED is running. This is the version number you see at the bottom of the main menu when you click on the "hamburger" icon ("≡") at the top, right of the Node-Red window in your browser.
  • +
  • node: '12.22.8' indicates that version 12.x of node.js is installed.
  • +
+

Controlling versions

+

IOTstack uses a service definition for Node-RED that includes these lines:

+
3
+4
+5
+6
    build:
+      context: ./services/nodered/.
+      args:
+      - DOCKERHUB_TAG=latest
+
+
+

If you do not see this structure in your Compose file, refer to updating to July 2022 syntax.

+
+

The value of the DOCKERHUB_TAG gives you the ability to control, from your Compose file, which versions of Node-RED and node.js run within your Node-RED container.

+

The allowable values of DOCKERHUB_TAG can be found on the DockerHub Node-RED tags page. The table below contains examples of tags that were available on DockerHub at the time of writing (2022-07-06):

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
tagNode-RED versionnode.js version
latest2.2.214.x
latest-142.2.214.x 📌
2.2.22.2.2 📌14.x
2.2.2-142.2.2 📌14.x 📌
+

Interpreting the tag:

+
    +
  1. +

    The sub-string to the left of the hyphen determines the version of Node-RED:

    +
      +
    • "latest" means the most up-to-date version, implying that the actual version number can change any time you follow the process to upgrade Node-RED.
    • +
    • "2.2.2" pins your container to that specific version of Node-RED, implying that the version number will be frozen until you change the pin.
    • +
    +
  2. +
  3. +

    The sub-string to the right of the hyphen determines the version of node.js:

    +
      +
    • "-14" refers to node.js version 14.x and pins your container to that specific version of node.js.
    • +
    • If the hyphen and suffix are omitted, it implies that the actual version of node.js can change any time you follow the process to upgrade Node-RED.
    • +
    +
  4. +
+

In short:

+
    +
  • If you pin both sides to specific values (eg "2.2.2-14") then all decisions about when to upgrade and which versions to use are under your control; but
  • +
  • If you use "latest" then all timing and version decisions are under the control of the maintainers of the DockerHub images.
  • +
+

IOTstack defaults to "latest". Although this appears to cede control to the maintainers of the DockerHub images, in practice it is no different to any other container where you pull its image directly from DockerHub using the latest tag (irrespective of whether latest is explicit or implied by omission).

+

The DOCKERHUB_TAG argument for Node-RED merely gives you the ability to pin to specific versions of Node-RED from within your Compose file, in the same way as you can use tags on image directives for other containers.

+

For example, suppose you wanted to pin to Node-RED version 2.2.2 with node.js version 12:

+
    +
  1. +

    Edit your Compose file so that the DOCKERHUB_TAG looks like this:

    +
    - DOCKERHUB_TAG=2.2.2-12
    +
    +
  2. +
  3. +

    Run the re-building the local Node-RED image commands.

    +
  4. +
+

Changing a pinned version and rebuilding may result in a new base image being downloaded from DockerHub.

+

Component management

+

via Dockerfile

+

You can install components by adjusting the Node-RED Dockerfile. This can be done by:

+
    +
  • Running the IOTstack menu and changing the selected Node-RED nodes; or
  • +
  • Editing your Node-RED Dockerfile using a text editor.
  • +
+

Using the IOTstack menu limits your choice of components to those presented in the menu. Editing the Dockerfile with a text editor is more flexible but carries the risk that your changes could be lost if you subsequently use the menu method.

+

To apply changes made to your Dockerfile, run the re-building the local Node-RED image commands.

+

via Manage Palette

+

You can add, remove or update components in Manage Palette. Node-RED will remind you to restart Node-RED and that is something you have to do by hand:

+
$ cd ~/IOTstack
+$ docker-compose restart nodered
+
+

Note:

+
    +
  • +

    Some users have reported misbehaviour from Node-RED if they do too many iterations of:

    +
      +
    • make a change in Manage Palette
    • +
    • restart Node-RED
    • +
    +

    It is better to make all the changes you intend to make, and only then restart Node-RED.

    +
  • +
+

via npm

+

You can also run npm inside the container to install any component that could be installed by npm in a non-container environment. This is the basic syntax:

+
$ cd ~/IOTstack
+$ docker exec -w /data nodered npm «command» «arguments…»
+$ docker-compose restart nodered
+
+

Examples:

+
    +
  • +

    To add the "find my iphone" node:

    +
    $ docker exec -w /data nodered npm install find-my-iphone-node
    +$ docker-compose restart nodered
    +
    +
  • +
  • +

    To remove the "find my iphone" node:

    +
    $ docker exec -w /data nodered npm uninstall find-my-iphone-node
    +$ docker-compose restart nodered
    +
    +
  • +
+

Note:

+
    +
  • You must include -w /data on each command. Any formula you find on the web will not include this. You have to remember to do it yourself!
  • +
  • Many web examples include the --save flag on the npm command. That flag is not needed (it is ignored because the behaviour it used to control has been the default since NPM version 5. Node-RED containers have been using NPM version 6 for some time.
  • +
  • See also the note above about restarting too frequently.
  • +
  • +

    You can use this approach if you need to force the installation of a specific version (which you don't appear to be able to do in Manage Palette). For example, to install version 4.0.0 of the "moment" node:

    +
    $ docker exec -w /data nodered npm install node-red-contrib-moment@4.0.0
    +$ docker-compose restart nodered
    +
    +
  • +
+

Comparison of methods

+

In terms of outcome, there is no real difference between the various methods. However, some nodes (eg "node-red-contrib-generic-ble" and "node-red-node-sqlite") must be installed by Dockerfile. The only way of finding out if a component must be installed via Dockerfile is to try Manage Palette and find that it doesn't work.

+

Aside from the exception cases that require Dockerfile or where you need to force a specific version, it is quicker to install nodes via Manage Palette and applying updates is a bit easier too. But it's really up to you.

+

If you're wondering about "backup", nodes installed via:

+
    +
  • Dockerfile – implicitly backed up when the Dockerfile is backed-up.
  • +
  • Manage Palette or npm install – explicitly backed up when the ~/IOTstack/volumes directory is backed-up.
  • +
+

Basically, if you're running IOTstack backups then your add-on nodes will be backed-up.

+

Component precedence

+

Components that are installed via Dockerfile wind up at the internal path:

+
/usr/src/node-red
+
+

Components installed via Manage Palette or docker exec -w /data wind up at the internal path:

+
/data
+
+

which is the same as the external path:

+
~/IOTstack/volumes/nodered/data
+
+

Because there are two places, this invites the question of what happens if a given component is installed in both? The answer is that components installed in /data take precedence.

+

Or, to put it more simply: in any contest between methods, Dockerfile comes last.

+

Resolving node duplication

+

Sometimes, even when you are 100% certain that you didn't do it, a component will turn up in both places. There is probably some logical reason for this but I don't know what it is.

+

The problem this creates is that a later version of a component installed via Dockerfile will be blocked by the presence of an older version of that component installed by a different method.

+

The nodered_list_installed_nodes.sh script helps discover when this situation exists. For example:

+
$ nodered_list_installed_nodes.sh 
+
+Fetching list of candidates installed via Dockerfile
+
+Components built into the image (via Dockerfile)
+  ACTIVE: node-red-admin
+  ACTIVE: node-red-configurable-ping
+  ACTIVE: node-red-contrib-boolean-logic
+  ACTIVE: node-red-contrib-generic-ble
+  ACTIVE: node-red-contrib-influxdb
+  ACTIVE: node-red-dashboard
+ BLOCKED: node-red-node-email
+  ACTIVE: node-red-node-pi-gpiod
+  ACTIVE: node-red-node-rbe
+  ACTIVE: node-red-node-sqlite
+  ACTIVE: node-red-node-tail
+
+Fetching list of candidates installed via Manage Palette or npm
+
+Components in persistent store at
+ /home/pi/IOTstack/volumes/nodered/data/node_modules
+  node-red-contrib-boolean-logic-ultimate
+  node-red-contrib-chartjs
+  node-red-node-email
+  node-red-contrib-md5
+  node-red-contrib-moment
+  node-red-contrib-pushsafer
+
+

Notice how the node-red-node-email instance installed in the Dockerfile is being blocked. To fix this problem:

+
$ cd ~/IOTstack
+$ docker exec -w /data nodered npm uninstall node-red-node-email
+$ docker-compose restart nodered
+
+

Package management

+

As well as providing the Node-RED service, the nodered container is an excellent testbed. Installing the DNS tools, Mosquitto clients and tcpdump will help you to figure out what is going on inside container-space.

+

There are two ways to add extra packages. The first method is to add them to the running container. For example, to add the Mosquitto clients:

+
$ docker exec nodered apk add --no-cache mosquitto-clients
+
+
+

The "apk" implies that the Node-RED container is based on Alpine Linux. Keep that in mind when you search for instructions on installing packages.

+
+

Packages installed this way will persist until the container is re-created (eg a down and up of the stack, or a reboot of your Raspberry Pi). This is a good choice if you only want to run a quick experiment.

+

The second method adds the packages to your local image every time you rebuild. Because the packages are in the local image, they are always in the running container. For example, to include the Mosquitto clients in every build:

+
    +
  1. +

    Edit your Compose file to include the package on the EXTRA_PACKAGES argument:

    +
    - EXTRA_PACKAGES=mosquitto-clients
    +
    +
    +

    If you do not see the EXTRA_PACKAGES argument in your Compose file, refer to updating to July 2022 syntax.

    +
    +
  2. +
  3. +

    Rebuild your local image by running the re-building the local Node-RED image commands.

    +
  4. +
+

You can specify multiple packages on the same line. For example:

+
- EXTRA_PACKAGES=mosquitto-clients bind-tools tcpdump
+
+

Notes:

+
    +
  1. Use a space to separate package names.
  2. +
  3. Do not encapsulate the list in quote marks.
  4. +
+

Updating to July 2022 syntax

+

The primary benefit of the new syntax is that you no longer risk the IOTstack menu overwriting any custom changes you may have made to your Node-RED Dockerfile.

+

If you install a clean copy of IOTstack, run the menu, enable Node-RED and select one or more add-on nodes then both your Compose file and Dockerfile will use the latest syntax automatically.

+

If you have an older version of IOTstack installed, the syntax used in your Compose file and Dockerfile will depend on when you last ran the menu and manipulated Node-RED.

+

To avoid any uncertainties, you can use a text editor to update your existing Compose file and Dockerfile to adopt the latest syntax.

+

Updating your Compose file

+
    +
  • +

    Step 1: Implement the new syntactic scaffolding:

    +

    The first three lines of the old syntax are:

    +
    1
    +2
    +3
      nodered:
    +    container_name: nodered
    +    build: ./services/nodered/.
    +
    +

    Replace line 3 (the one-line build: directive) with the following lines:

    +
    3
    +4
    +5
    +6
    +7
        build:
    +      context: ./services/nodered/.
    +      args:
    +      - DOCKERHUB_TAG=latest
    +      - EXTRA_PACKAGES=
    +
    +
  • +
  • +

    Step 2: Pin to the desired version (optional):

    +

    If your existing Dockerfile pins to a specific version, edit the value of DOCKERHUB_TAG (line 6 of your updated Compose file) to use the tag from your Dockerfile. For example, if your existing Dockerfile begins with:

    +
    FROM nodered/node-red:latest-12
    +
    +

    then line 6 of your Compose file should be:

    +
    6
          - DOCKERHUB_TAG=latest-12
    +
    +

    Note:

    +
      +
    • IOTstack switched to latest-12 in March 2021. The default for July 2022 syntax is latest. At the time of writing, that is the same as latest-14, which is what is recommended by Node-RED. If any of your flows has a dependence on node.js version 12 (or if you do not want to take the risk), use latest-12.
    • +
    +
  • +
  • +

    Step 3: Define extra packages (optional):

    +

    If your existing Dockerfile includes extra packages, edit the value of EXTRA_PACKAGES (line 7 of your updated Compose file) to list the same packages. For example, if your existing Dockerfile includes:

    +
    RUN apk update && apk add --no-cache eudev-dev mosquitto-clients bind-tools tcpdump
    +
    +

    then everything after eudev-dev should appear on line 7 of your Compose file:

    +
    6
          - EXTRA_PACKAGES=mosquitto-clients bind-tools tcpdump
    +
    +

    Notes:

    +
      +
    • use spaces between package names.
    • +
    • do not enclose the list of packages in quotes.
    • +
    • do not include eudev-dev (it is specified in the updated Dockerfile).
    • +
    +
  • +
+

Updating your Dockerfile

+

The first four lines of your existing Dockerfile will have a structure similar to this:

+
1
+2
+3
+4
FROM nodered/node-red:latest-12
+USER root
+RUN apk update && apk add --no-cache eudev-dev
+USER node-red
+
+
+

The actual text will depend on whether you have modified the tag in the first line or added extra packages to the third line.

+
+

Replace the first four lines of your Dockerfile with the following lines:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
# reference argument - omitted defaults to latest
+ARG DOCKERHUB_TAG=latest
+
+# Download base image
+FROM nodered/node-red:${DOCKERHUB_TAG}
+
+# reference argument - omitted defaults to null
+ARG EXTRA_PACKAGES
+ENV EXTRA_PACKAGES=${EXTRA_PACKAGES}
+
+# default user is node-red - need to be root to install packages
+USER root
+
+# install packages
+RUN apk update && apk add --no-cache eudev-dev ${EXTRA_PACKAGES}
+
+# switch back to default user
+USER node-red
+
+# variable not needed inside running container
+ENV EXTRA_PACKAGES=
+
+# add-on nodes follow
+
+

All remaining lines of your original Dockerfile should be left as-is.

+

Applying the new syntax

+

Run the re-building the local Node-RED image commands.

+

Bluetooth support

+

If you enable the node-red-contrib-generic-ble add on node, you will also need to make the following changes to the Node-RED service definition in your docker-compose.yml:

+
    +
  • +

    Add the following mapping to the volumes: clause:

    +
    - /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket
    +
    +
  • +
  • +

    Add the following devices: clause:

    +
    devices:
    +  - "/dev/serial1:/dev/serial1"
    +  - "/dev/vcio:/dev/vcio"
    +  - "/dev/gpiomem:/dev/gpiomem"
    +
    +
  • +
+

Notes:

+
    +
  • These changes are specific to the Raspberry Pi. If you need Bluetooth support on non-Pi hardware, you will need to figure out the details for your chosen platform.
  • +
  • Historically, /dev/ttyAMA0 meant "the serial interface" on Raspberry Pis. Subsequently, it came to mean "the Bluetooth interface" where Bluetooth support was present. Now, /dev/serial1 is used to mean "the Bluetooth interface".
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Octoprint/index.html b/Containers/Octoprint/index.html new file mode 100644 index 000000000..bc424bd49 --- /dev/null +++ b/Containers/Octoprint/index.html @@ -0,0 +1,2952 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Octoprint - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

OctoPrint – the snappy web interface for your 3D printer

+

References

+ +

3D Printer device mapping

+

The first time you try to bring up the OctoPrint container, you should expect to see the following error:

+
parsing ~/IOTstack/docker-compose.yml: error while interpolating services.octoprint.devices.[]: required variable OCTOPRINT_DEVICE_PATH is missing a value: eg echo OCTOPRINT_DEVICE_PATH=/dev/serial0 >>~/IOTstack/.env
+
+

The message is telling you that you need to define the device path to your 3D Printer.

+

You need to work out how your printer presents itself and define the external device accordingly.

+

option 1 - /dev/ttyUSBn

+

Using "ttyUSBn" will "work" but, because of the inherent variability in the name, this approach is not recommended.

+

The "n" in the "ttyUSBn" can vary depending on which USB devices are attached to your Raspberry Pi and the order in which they are attached. The "n" may also change as you add and remove devices.

+

If the OctoPrint container is up when the device number changes, the container will crash, and it will either go into a restart loop if you try to bring it up when the expected device is not "there", or will try to communicate with a device that isn't your 3D printer.

+

Suppose you choose this method and your 3D Printer mounts as /dev/ttyUSB0, you would define your printer like this:

+
$ echo OCTOPRINT_DEVICE_PATH=/dev/ttyUSB0 >>~/IOTstack/.env
+
+

option 2 - /dev/serial/by-id/xxxxxxxx

+

The "xxxxxxxx" is (usually) unique to your 3D printer. To find it, connect your printer to your Raspberry Pi, then run the command:

+
$ ls -1 /dev/serial/by-id
+
+

You will get an answer like this:

+
usb-Silicon_Labs_CP2102N_USB_to_UART_Bridge_Controller_3b14eaa48a154d5e87032d59459d5206-if00-port0
+
+

Suppose you choose this method and your 3D Printer mounts as shown above. You would define your printer like this:

+
$ echo OCTOPRINT_DEVICE_PATH=/dev/serial/by-id/usb-Silicon_Labs_CP2102N_USB_to_UART_Bridge_Controller_3b14eaa48a154d5e87032d59459d5206-if00-port0 >>~/IOTstack/.env
+
+

Note:

+
    +
  • If you have multiple serial devices attached, you will get multiple lines in the output. It is up to you to sort out which one belongs to your 3D printer, possibly by disconnecting and re-attaching the printer and observing how the list changes.
  • +
  • The uniqueness of device IDs is under the control of the device manufacturer. Each manufacturer should ensure their devices are unique but some manufacturers are more diligent than others.
  • +
  • device by-id names follow the device. In other words, if you have two or more Raspberry Pis and a collection of serial devices (3D printers, Zigbee adapters, UARTs, and so on), a 3D printer will always get the same by-id name, irrespective of which Raspberry Pi it is attached to.
  • +
  • device by-id names do not persist if the physical device is disconnected. If you switch off your 3D printer or disconnect the USB cable while the OctoPrint container is running, the container will crash.
  • +
+

option 3 - /dev/humanReadableName

+

Suppose your 3D printer is a MasterDisaster5000Pro, and that you would like to be able to set up the device to use a human-readable name like:

+
/dev/MasterDisaster5000Pro
+
+

Start by disconnecting your 3D printer from your Raspberry Pi. Next, run this command:

+
$ tail -f /var/log/messages
+
+

Connect your 3D printer and observe the log output. You are interested in messages that look like this:

+
mmm dd hh:mm:ss mypi kernel: [423839.626522] cp210x 1-1.1.3:1.0: device disconnected
+mmm dd hh:mm:ss mypi kernel: [431265.973308] usb 1-1.1.3: new full-speed USB device number 10 using dwc_otg
+mmm dd hh:mm:ss mypi kernel: [431266.109418] usb 1-1.1.3: New USB device found, idVendor=dead, idProduct=beef, bcdDevice= 1.00
+mmm dd hh:mm:ss mypi kernel: [431266.109439] usb 1-1.1.3: New USB device strings: Mfr=1, Product=2, SerialNumber=3
+mmm dd hh:mm:ss mypi kernel: [431266.109456] usb 1-1.1.3: Product: CP2102N USB to UART Bridge Controller
+mmm dd hh:mm:ss mypi kernel: [431266.109471] usb 1-1.1.3: Manufacturer: Silicon Labs
+mmm dd hh:mm:ss mypi kernel: [431266.109486] usb 1-1.1.3: SerialNumber: cafe80facefeed
+mmm dd hh:mm:ss mypi kernel: [431266.110657] cp210x 1-1.1.3:1.0: cp210x converter detected
+mmm dd hh:mm:ss mypi kernel: [431266.119225] usb 1-1.1.3: cp210x converter now attached to ttyUSB0
+
+

and, in particular, these two lines:

+
… New USB device found, idVendor=dead, idProduct=beef, bcdDevice= 1.00
+… SerialNumber: cafe80facefeed
+
+

Terminate the tail command by pressing Control+C.

+

Use this line as a template:

+
SUBSYSTEM=="tty", ATTRS{idVendor}=="«idVendor»", ATTRS{idProduct}=="«idProduct»", ATTRS{serial}=="«SerialNumber»", SYMLINK+="«sensibleName»"
+
+

Replace the «delimited» values with those you see in the log output. For example, given the above log output, and the desire to associate your 3D printer with the human-readable name of "MasterDisaster5000Pro", the result would be:

+
SUBSYSTEM=="tty", ATTRS{idVendor}=="dead", ATTRS{idProduct}=="beef", ATTRS{serial}=="cafe80facefeed", SYMLINK+="MasterDisaster5000Pro"
+
+

Next, ensure the required file exists by executing the following command:

+
$ sudo touch /etc/udev/rules.d/99-usb-serial.rules
+
+
+

If the file does not exist already, the touch command creates an empty file, owned by root, with mode 644 (rw-r--r--) permissions (all of which are correct).

+
+

Use sudo and your favourite text editor to edit /etc/udev/rules.d/99-usb-serial.rules and insert the "SUBSYSTEM==" line you prepared earlier into that file, then save the file.

+
+

Rules files are read on demand so there is no start or reload command to execute.

+
+

Check your work by disconnecting, then re-connecting your 3D printer, and then run:

+
$ ls /dev
+
+

You should expect to see the human-readable name you chose in the list of devices.

+

You would then define your printer like this:

+
$ echo OCTOPRINT_DEVICE_PATH=/dev/MasterDisaster5000Pro >>~/IOTstack/.env
+
+

Notes:

+
    +
  • device names follow the device. In other words, if you have two or more Raspberry Pis and a collection of serial devices (3D printers, Zigbee adapters, UARTs, and so on), you can build a single 99-usb-serial.rules file that you install on all of your Raspberry Pis. Then, you can attach a named device to any of your Raspberry Pis and it will always get the same name.
  • +
  • device names do not persist if the physical device is disconnected. If you switch off your 3D printer or disconnect the USB cable while the OctoPrint container is running, the container will crash.
  • +
+

the /dev/video0:/dev/video0 mapping

+

By default, video camera support is disabled. This is because it is unsafe to assume a camera is present on /dev/video0.

+
+

See the Webcams topic of the Octoprint Community Forum for help configuring other kinds of cameras.

+
+

The OctoPrint docker image includes an MJPG streamer. You do not need to run another container with a streamer unless you want to.

+

To activate a Raspberry Pi camera attached via ribbon cable:

+
    +
  1. Follow the instructions at raspberrypi.org to connect and test the camera. There are guides on YouTube (example) if you need help working out how to insert the ribbon cable.
  2. +
  3. Confirm the presence of /dev/video0.
  4. +
  5. +

    Edit docker-compose.yml and uncomment all of the commented-out lines in the following:

    +
    environment:
    +  # - ENABLE_MJPG_STREAMER=true
    +  # - MJPG_STREAMER_INPUT=-r 640x480 -f 10 -y
    +  # - CAMERA_DEV=/dev/video0
    +
    +devices:
    +  # - /dev/video0:/dev/video0
    +
    +

    Note:

    +
      +
    • The device path on the right hand side of the CAMERA_DEV environment variable corresponds with the right hand side (ie after the colon) of the device mapping. There should be no reason to change either.
    • +
    +
  6. +
+

The "640x480" MJPG_STREAMER_INPUT settings will probably result in your camera feed being "letterboxed" but they will get you started. A full list of options is at mjpg-streamer-configuration-options.

+

The typical specs for a baseline Raspberry Pi camera are:

+
    +
  • 1080p 720p 5Mp Webcam
  • +
  • Max resolution: 2592x1944
  • +
  • Max frame rate: VGA 90fps, 1080p 30fps
  • +
  • CODEC: MJPG H.264 AVC
  • +
+

For that type of camera, the following is probably more appropriate:

+
  - MJPG_STREAMER_INPUT=-r 1152x648 -f 10
+
+

The resolution of 1152x648 is 60% of 1080p 1920x1080 and does not cause letterboxing. The resolution and rate of 10 frames per second won't over-tax your communications links, and the camera is MJPEG-capable so it does not need the -y option.

+

Practical usage

+

starting the OctoPrint container

+

To start a print session:

+
    +
  1. Turn the 3D printer on.
  2. +
  3. +

    Bring up the container:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d octoprint
    +
    +
  4. +
+

If you try to start the OctoPrint container before your 3D printer has been switched on and the USB interface has registered with the Raspberry Pi, the container will go into a restart loop.

+

first run – the Setup Wizard

+

Use a browser to point to port 9980 on your Raspberry Pi. For example:

+
http://raspberrypi.local:9980
+
+

This will launch the "Setup Wizard".

+
    +
  1. +

    Click the "Next" button until you reach the "Access Control" screen:

    +
      +
    • Define a Username and Password, and keep a record of your decisions.
    • +
    • Click "Create Account".
    • +
    • Ignore the alarming popup alert by clicking "Ignore". This alert is a result of OctoPrint running in a Docker container.
    • +
    • Click "Next".
    • +
    +
  2. +
  3. +

    At the "Online Connectivity Check" screen:

    +
      +
    • Click "Disable Connectivity Check".
    • +
    • Click "Next".
    • +
    +
  4. +
  5. +

    At the "Configure Anonymous Usage Tracking" and "Configure plugin blacklist processing" screens:

    +
      +
    • Make a decision about whether you want the feature enabled or disabled and click the appropriate button.
    • +
    • Click "Next".
    • +
    +
  6. +
  7. +

    At the "Set up your printer profile" screen:

    +
      +
    • It is probably a good idea to visit the tabs and set values appropriate to your printer (build volume, at least).
    • +
    • Click "Next".
    • +
    +
  8. +
  9. +

    At the "Server Commands" screen:

    +
      +
    • +

      Enter the following in the "Restart OctoPrint" field:

      +
      s6-svc -r /var/run/s6/services/octoprint
      +
      +
    • +
    • +

      Click "Next".

      +
    • +
    +
  10. +
  11. +

    At the "Webcam & Timelapse Recordings" screen, and assuming you are configuring a PiCamera:

    +
      +
    • +

      Enter the following in the "Stream URL" field:

      +
      /webcam/?action=stream
      +
      +

      Click the "Test" button to confirm that the camera is working, then click "Close".

      +
    • +
    • +

      Enter the following in the "Snapshot URL" field:

      +
      http://localhost:8080/?action=snapshot
      +
      +

      Click the "Test" button to confirm that the camera is working, then click "Close".

      +
    • +
    • +

      Enter the following in the "Path to FFMPEG" field:

      +
      /usr/bin/ffmpeg
      +
      +

      The expected result is the message "The path is valid".

      +
    • +
    • +

      Click "Next".

      +
    • +
    +
  12. +
  13. +

    Click "Finish" then click the button to reload the user interface.

    +
  14. +
+

after the first run

+

Use a browser to point to port 9980 on your Raspberry Pi. For example:

+
http://raspberrypi.local:9980
+
+

Supply your user credentials and login.

+ +

OctoPrint will display numerous messages in popup windows. These generally fall into two categories:

+
    +
  • Messages that refer to updates; and
  • +
  • Messages that refer to other events.
  • +
+

In general, you can ignore messages about updates. You will get all updates automatically the next time the octoprint-docker container is rebuilt and pushed to DockerHub.

+

You can, if you wish, allow an update to proceed. It might be appropriate to do that if you want to test an update. Just be aware that:

+
    +
  1. Updates are ephemeral and will disappear the next time the Octoprint container is created.
  2. +
  3. Updates can change the structure of the persistent storage area in a way which can't be undone, and which may prevent the Octoprint container from starting the next time it is created. In other words, if you want to trial an update, take a backup of OctoPrint's persistent storage area first.
  4. +
+

restarting the OctoPrint container

+

You can restart the OctoPrint service in two ways:

+
    +
  • via the Raspberry Pi command line; or
  • +
  • via the OctoPrint user interface.
  • +
+

Whichever method you choose will result in a refresh of the OctoPrint user interface and you will need to follow the prompts to reload your browser page.

+

restarting via the command line

+

Run the following commands:

+
$ cd ~/IOTstack
+$ docker-compose restart octoprint
+
+

restarting via OctoPrint user interface

+

From the "System" icon in the OctoPrint toolbar (looks like a power button symbol):

+
    +
  1. Choose "Restart OctoPrint".
  2. +
+

Note:

+
    +
  • +

    If you do not see the "System" icon in the toolbar, fix it line this:

    +
      +
    1. Click the "Settings" icon (looks like a wrench) in the OctoPrint toolbar.
    2. +
    3. Choose "Server".
    4. +
    5. +

      Enter the following into the "Restart OctoPrint" field:

      +
      s6-svc -r /var/run/s6/services/octoprint
      +
      +
    6. +
    7. +

      Click "Save".

      +
    8. +
    +
  • +
+

stopping the OctoPrint container

+

Unless you intend to leave your printer switched on 24 hours a day, you will also need to be careful when you switch off the printer:

+
    +
  1. +

    Terminate the container:

    +
    $ cd ~/IOTstack
    +$ docker-compose stop octoprint
    +$ docker-compose rm -f octoprint
    +
    +
  2. +
  3. +

    Turn the 3D printer off.

    +
  4. +
+

If you turn the printer off without terminating the container, you will crash the container.

+

Video feed (built-in camera interface)

+

You can view the video feed independently of the OctoPrint web interface like this:

+
http://raspberrypi.local:9980/webcam/?action=stream
+
+

Silencing the security warning

+

OctoPrint assumes it is running "natively" rather than in a container. From a data-communications perspective, OctoPrint (the process running inside the OctoPrint container) sees itself as running on a computer attached to the internal Docker network. When you connect to OctoPrint's web interface from a client device attached to an external network, OctoPrint sees that your source IP address is not on the internal Docker network and it issues a security warning.

+

To silence the warning:

+
    +
  1. +

    Terminate the container if it is running:

    +
    $ cd ~/IOTstack
    +$ docker-compose stop octoprint
    +$ docker-compose rm -f octoprint
    +
    +
  2. +
  3. +

    use sudo and your favourite text editor to open the following file:

    +
    ~/IOTstack/volumes/octoprint/octoprint/config.yaml
    +
    +
  4. +
  5. +

    Implement the following pattern:

    +
    server:
    +  
    +  ipCheck:
    +    enabled: true
    +    trustedSubnets:
    +    - 203.0.132.0/24
    +
    +

    Notes:

    +
      +
    • The server:, ipCheck: and enabled: directives may already be in place but the trustedSubnets: directive may not be. Add it, and then add your local subnet(s) where you see the "192.168.1.0/24" example.
    • +
    • Remember to use spaces in YAML files. Do not use tabs.
    • +
    +
  6. +
  7. +

    Save the file.

    +
  8. +
  9. +

    Bring up the container:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d octoprint
    +
    +
  10. +
+

Routine container maintenance

+

You can check for updates like this:

+
$ cd ~/IOTstack
+$ docker-compose pull octoprint
+$ docker-compose up -d octoprint
+$ docker system prune
+
+

If you forget your username and password

+

You can view a list of usernames like this:

+
$ docker exec octoprint octoprint --basedir /octoprint/octoprint user list
+
+

To reset a user's password:

+
    +
  1. +

    Use the following line as a template and replace «username» and «password» with appropriate values:

    +
    $ docker exec octoprint octoprint --basedir /octoprint/octoprint user password --password «password» «username»
    +
    +
  2. +
  3. +

    Execute the edited command. For example, to set the password for user "me" to "verySecure":

    +
    $ docker exec octoprint octoprint --basedir /octoprint/octoprint user password --password verySecure me
    +
    +
  4. +
  5. +

    Restart OctoPrint:

    +
    $ cd ~/IOTstack
    +$ docker-compose restart octoprint
    +
    +
  6. +
+

Note:

+
    +
  • +

    OctoPrint supports more than one username. To explore the further:

    +
    $ docker exec octoprint octoprint --basedir /octoprint/octoprint user --help
    +
    +
  • +
+

If all else fails…

+

If the OctoPrint container seems to be misbehaving, you can get a "clean slate" by:

+
$ cd ~/IOTstack
+$ docker-compose stop octoprint
+$ docker-compose rm -f octoprint
+$ sudo rm -rf ./volumes/octoprint
+$ docker-compose up -d octoprint
+
+

The OctoPrint container is well-behaved and will re-initialise its persistent storage area correctly. OctoPrint will adopt "first run" behaviour and display the Setup Wizard.

+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/OpenHab/index.html b/Containers/OpenHab/index.html new file mode 100644 index 000000000..58c92502b --- /dev/null +++ b/Containers/OpenHab/index.html @@ -0,0 +1,2236 @@ + + + + + + + + + + + + + + + + + + + + + + + + + openHAB - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

openHAB

+

References

+ +

openHAB runs in "host mode" so there are no port mappings. The default port bindings on IOTstack are:

+
    +
  • 4050 - the HTTP port of the web interface (instead of 8080)
  • +
  • 4051 - the HTTPS port of the web interface (instead of 8443)
  • +
  • 8101 - the SSH port of the Console (since openHAB 2.0.0)
  • +
  • 5007 - the LSP port for validating rules (since openHAB 2.2.0)
  • +
+

If you want to change either of the first two:

+
    +
  1. +

    Edit the openhab fragment in docker-compose.yml:

    +
        - OPENHAB_HTTP_PORT=4050
    +    - OPENHAB_HTTPS_PORT=4051
    +
    +
  2. +
  3. +

    Recreate the openHAB container:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d openhab
    +
    +
  4. +
+

There do not appear to be any environment variables to control ports 8101 or 5007 so, if other containers you need to run also depend on those ports, you will have to figure out some way of resolving the conflict.

+

Note:

+
    +
  • +

    The original IOTstack documentation included:

    +
    +

    openHAB has been added without Amazon Dashbutton binding.

    +
    +

    but it is not clear if this is still the case.

    +
  • +
  • +

    Amazon Dashbuttons have been discontinued so this may no longer be relevant.

    +
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/PgAdmin4/index.html b/Containers/PgAdmin4/index.html new file mode 100644 index 000000000..a3b0c92c9 --- /dev/null +++ b/Containers/PgAdmin4/index.html @@ -0,0 +1,2360 @@ + + + + + + + + + + + + + + + + + + + + + + + + + pgAdmin4 - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

pgAdmin4

+

References

+ +

About

+

pgAdmin4 is a graphical user interface to PostgreSQL.

+

Configuration

+

Runtime image

+

The service definition includes the following lines:

+
  image: gpongelli/pgadmin4-arm:latest-armv7
+  platform: linux/arm/v7
+# image: gpongelli/pgadmin4-arm:latest-armv8
+
+

The ARMv7 image is enabled by default. This will run on both 32-bit (ARMv7) and 64-bit (ARMv8) systems. The platform clause silences warnings from docker-compose that arise when you try to run an ARMv7 image on ARMv8 architecture.

+

If you are running on a full 64-bit system, you should edit your service definition so that it looks like this:

+
# image: gpongelli/pgadmin4-arm:latest-armv7
+# platform: linux/arm/v7
+  image: gpongelli/pgadmin4-arm:latest-armv8
+
+

Container Time Zone

+

The service definition includes the TZ environment variable. It defaults to Etc/UTC. You can either edit the environment variable directly in your compose file, or provide your own substitute by editing ~/IOTstack/.env. Example:

+
$ cat ~/IOTstack/.env
+TZ=Australia/Sydney
+
+

First run

+

These instructions assume you have selected the postgresql container from the IOTstack menu, and that that container is running.

+

Complete the following steps:

+
    +
  1. +

    Use your web browser to connect to pgAdmin4 on port 5050. For example:

    +
      +
    • http://raspberrypi.local:5050
    • +
    +

    The pgAdmin4 service takes a while to start so please be patient if you have only just launched the container. Once your browser is able to connect to pgAdmin4 successfully, the home screeen will be displayed, overlaid with a prompt to enter a master password:

    +

    +
  2. +
  3. +

    Enter a master password.

    +
  4. +
  5. Click "OK" to set the master password. The dialog will disappear leaving the home screen.
  6. +
  7. +

    Click "Add New Server". This displays the server registration sheet:

    +

    +
  8. +
  9. +

    Give the server a name. The name is not important. It just needs to be meaningful to you.

    +
  10. +
  11. +

    Click the "Connection" tab:

    +

    +
  12. +
  13. +

    Enter the name of the PostgreSQL container (ie "postgres").

    +
  14. +
  15. The default port is 5432. This is the internal port number the PostgreSQL container is listening on. It is unlikely that you will need to change this.
  16. +
  17. In the "Maintenance database" field, enter the value of the POSTGRES_DB environment variable as it applies to the PostgreSQL container.
  18. +
  19. In the "Username" field, enter the value of the POSTGRES_USER environment variable as it applies to the PostgreSQL container.
  20. +
  21. In the "Password" field, enter the value of the POSTGRES_PASSWORD environment variable as it applies to the PostgreSQL container.
  22. +
  23. Enable the "Save password" switch if you think that is appropriate.
  24. +
  25. Click the "Save" button.
  26. +
+

Keep in mind that the values of the environment variables you set in steps 9, 10 and 11 only apply the first time you launch the PostgreSQL container. If you change any of these in PostgreSQL, you will have to make matching changes in pgAdmin4.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Pi-hole/index.html b/Containers/Pi-hole/index.html new file mode 100644 index 000000000..372d719d6 --- /dev/null +++ b/Containers/Pi-hole/index.html @@ -0,0 +1,3001 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Pi-hole - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Pi-hole

+

Pi-hole is a fantastic utility to reduce ads.

+

References

+ +

Environment variables

+

In conjunction with controls in Pi-hole's web GUI, environment variables govern much of Pi-hole's behaviour.

+

If you are running new menu (master branch), environment variables are inline in your compose file. If you are running old menu, the variables will be in:

+
~/IOTstack/services/pihole/pihole.env
+
+
+

There is nothing about old menu which requires the variables to be stored in the pihole.env file. You can migrate everything to docker-compose.yml if you wish.

+
+

Pi-hole's authoritative list of environment variables can be found here. Although many of Pi-hole's options can be set through its web GUI, there are two key advantages to using environment variables:

+
    +
  1. If you ever need to reset Pi-hole by erasing its persistent storage area, configuration options set using environment variables will persist while those set through the GUI may be lost; and
  2. +
  3. On at least two occasions in its history, Pi-hole upgrades have had the effect of wiping configuration options set through the GUI, whereas options set using environment variables survived.
  4. +
+

Admin password

+

By default, Pi-hole does not have an administrator password. That is because the default service definition provided by IOTstack contains the following environment variable with no value on its right hand side:

+
- WEBPASSWORD=
+
+

Each time the Pi-hole container is launched, it checks for the presence or absence of the WEBPASSWORD environment variable, then reacts like this:

+
    +
  • +

    If WEBPASSWORD is defined but does not have a value:

    +
      +
    • No admin password is set;
    • +
    • Any previous admin password is cleared;
    • +
    • You will be able to connect to Pi-hole's web interface without providing a password (you won't even see the login screen); and
    • +
    • The main menu (≡) will not contain a logout command.
    • +
    +

    This is the default situation for IOTstack.

    +
  • +
  • +

    If WEBPASSWORD is defined and has a value, that value will become the admin password. For example, to change your admin password to be "IOtSt4ckP1Hol3":

    +
      +
    1. +

      Edit your compose file so that Pi-hole's service definition contains:

      +
      - WEBPASSWORD=IOtSt4ckP1Hol3
      +
      +
    2. +
    3. +

      Run:

      +
      $ cd ~/IOTstack
      +$ docker-compose up -d pihole
      +
      +

      docker-compose will notice the change to the environment variable and re-create the container. The container will see that WEBPASSWORD has a value and will change the admin password to "IOtSt4ckP1Hol3".

      +

      You will be prompted for a password whenever you connect to Pi-hole's web interface.

      +
    4. +
    +
  • +
  • +

    If WEBPASSWORD is undefined (absent from your compose file), Pi-hole behaves like this:

    +
      +
    • +

      If this is the first time Pi-hole has been launched, a random password is generated.

      +

      Pi-hole senses "first launch" if it has to initialise its persistent storage area. See also getting a clean slate. You can discover the password by running:

      +
      $ docker logs pihole | grep random
      +
      +

      Remember, docker logs are cleared each time a container is terminated or re-created so you need to run that command before the log disappears!

      +
    • +
    • +

      Otherwise, whatever password was set on the previous launch will be re-used.

      +
    • +
    +
  • +
+

about pihole -a -p

+

Some Pi-hole documentation on the web recommends using the following command to change Pi-hole's admin password:

+
$ docker exec pihole pihole -a -p «yourPasswordHere»
+
+

That command works but its effect will always be overridden by WEBPASSWORD. For example, suppose your service definition contains:

+
- WEBPASSWORD=myFirstPassword
+
+

When you start the container, the admin password will be "myFirstPassword". If you run:

+
$ docker exec pihole pihole -a -p mySecondPassword
+
+

then "mySecondPassword" will become the admin password until the next time the container is re-created by docker-compose, at which point the password will be reset to "myFirstPassword".

+

Given this behaviour, we recommend that you ignore the pihole -a -p command.

+

Logging

+

You can control the amount of information Pi-hole retains about your DNS queries using the "Privacy Settings" tab of the "Settings" group. The default is "Show & record everything".

+

If you choose any option except "Anonymous mode", then Pi-hole divides the logging store into two parts:

+
    +
  • Entries which are more recent than 24 hours; and
  • +
  • Entries which are older than 24 hours.
  • +
+

In the "System" tab of the "Settings" group is a Flush logs (last 24 hours) button. Clicking that button erases all log entries which are more recent than 24 hours. The button does not erase entries which are older than 24 hours.

+

Retention of log entries older than 24 hours is controlled by the following environment variable:

+
- FTLCONF_MAXDBDAYS=365
+
+

The default (which applies if the variable is omitted) is to retain log entries for 365 days.

+

Depending on your DNS activity, the database where the log entries are stored can become quite large. Setting this variable to a shorter period will help you control the amount of storage Pi-hole consumes on disk and in your backups.

+

Tip:

+
    +
  • +

    Adding this variable to an existing service definition, or changing the number of days to be less than the previous setting will not reduce the size of the logging database. Although Pi-hole will implement the change, the SQLite database where the logs are written retains the released storage for subsequent re-use. If you want to reclaim that space, run the following command:

    +
    $ sqlite3 ~/IOTstack/volumes/pihole/etc-pihole/pihole-FTL.db "vacuum;"
    +
    +

    The command should not need sudo because pi is the owner by default. There is no need to terminate Pi-hole before running this command (SQLite handles any contention).

    +
  • +
+

Recursive resolvers

+

You can control which public DNS servers are used by PiHole when it needs to refer queries to the Internet. You do this by enabling or disabling checkboxes in the "Upstream DNS Servers" panel of the "DNS" tab in the "Settings" group.

+

The default is to use the two Google IPv4 DNS servers which correspond with 8.8.8.8 and 8.8.4.4, respectively.

+

An alternative to toggling checkboxes in the Pi-hole GUI is to use an environment variable:

+
- PIHOLE_DNS_=8.8.8.8;8.8.4.4
+
+
+

The variable does end with an underscore!

+
+

This variable takes a semi-colon-separated list of DNS servers. You can discover the IP address associated with a checkbox by hovering your mouse pointer over the checkbox and waiting for a tool-tip to appear:

+

+

Advanced variables

+
+(advanced) reverse DNS query handling +

First, understand that there are two basic types of DNS query:

+
    +
  • +

    forward queries:

    +
      +
    • question: "what is the IP address of fred.yourdomain.com?"
    • +
    • answer: 192.168.1.100
    • +
    +
  • +
  • +

    reverse queries:

    +
      +
    • question: "what is the domain name for 192.168.1.100?"
    • +
    • answer: fred.yourdomain.com
    • +
    +
  • +
+

Pi-hole has its own built-in DNS server which can answer both kinds of queries. The implementation is useful but doesn't offer all the features of a full-blown DNS server like BIND9. If you decide to implement a more capable DNS server to work alongside Pi-hole, you will need to understand the following Pi-hole environment variables:

+
    +
  • +

    REV_SERVER=

    +

    If you configure Pi-hole's built-in DNS server to be authoritative for your local domain name, REV_SERVER=false is appropriate, in which case none of the variables discussed below has any effect.

    +

    Setting REV_SERVER=true allows Pi-hole to forward queries that it can't answer to a local upstream DNS server, typically running inside your network.

    +
  • +
  • +

    REV_SERVER_DOMAIN=yourdomain.com (where "yourdomain.com" is an example)

    +

    The Pi-hole documentation says:

    +
    +

    "If conditional forwarding is enabled, set the domain of the local network router".

    +
    +

    The words "if conditional forwarding is enabled" mean "when REV_SERVER=true".

    +

    However, this option really has little-to-nothing to do with the "domain of the local network router". Your router may have an IP address that reverse-resolves to a local domain name (eg gateway.mydomain.com) but this is something most routers are unaware of, even if you have configured your router's DHCP server to inform clients that they should assume a default domain of "yourdomain.com".

    +

    This variable actually tells Pi-hole the name of your local domain. In other words, it tells Pi-hole to consider the possibility that an unqualified name like "fred" could be the fully-qualified domain name "fred.yourdomain.com".

    +
  • +
  • +

    REV_SERVER_TARGET=192.168.1.5 (where 192.168.1.5 is an example):

    +

    The Pi-hole documentation says:

    +
    +

    "If conditional forwarding is enabled, set the IP of the local network router".

    +
    +

    This option tells Pi-hole where to direct forward queries that it can't answer. In other words, Pi-hole will send a forward query for fred.yourdomain.com to 192.168.1.5.

    +

    It may be appropriate to set REV_SERVER_TARGET to the IP address of your router (eg 192.168.1.1) but, unless your router is running as a DNS server (not impossible but uncommon), the router will likely just relay any queries to your ISP's DNS servers (or other well-known DNS servers like 8.8.8.8 or 1.1.1.1 if you have configured those). Those external DNS servers are unlikely to be able to resolve queries for names in your private domain, and won't be able to do anything sensible with reverse queries if your home network uses RFC1918 addressing (which most do: 192.168.x.x being the most common example).

    +

    Forwarding doesn't guarantee that 192.168.1.5 will be able to answer the query. The DNS server at 192.168.1.5 may well relay the query to yet another server. In other words, this environment variable does no more than set the next hop.

    +

    If you are planning on using this option, the target needs to be a DNS server that is authoritative for your local domain and that, pretty much, is going to be a local upstream DNS server inside your home network like another Raspberry Pi running BIND9.

    +
  • +
  • +

    REV_SERVER_CIDR=192.168.1.0/24 (where 192.168.1.0/24 is an example)

    +

    The Pi-hole documentation says:

    +
    +

    "If conditional forwarding is enabled, set the reverse DNS zone (e.g. 192.168.0.0/24)".

    +
    +

    This is correct but it lacks detail.

    +

    The string "192.168.1.0/24" defines your local subnet using Classless Inter-Domain Routing (CIDR) notation. Most home subnets use a subnet-mask of 255.255.255.0. If you write that out in binary, it is 24 1-bits followed by 8 0-bits, as in:

    +
       255  .   255  .   255  .   0
    +11111111 11111111 11111111 00000000
    +
    +

    Those 24 one-bits are where the /24 comes from in 192.168.1.0/24. When you perform a bitwise logical AND between that subnet mask and 192.168.1.0, the ".0" is removed (conceptually), as in:

    +
    192.168.1.0 AND 255.255.255.0 = 192.168.1
    +
    +

    What it means is:

    +
      +
    1. The network prefix is "192.168.1".
    2. +
    3. This host on the 192.168.1 network is the reserved address "192.168.1.0". It is better to think of this as "the network prefix followed by all-zero bits in the host portion". It is not common to see the .0 address used in practice. A device either knows its IP address or it doesn't. If it doesn't then it won't know its prefix so it will use 0.0.0.0 as a substitute for "this".
    4. +
    5. The range of IP addresses available for allocation to hosts on this subnet is 192.168.1.1 through 192.168.1.254 inclusive.
    6. +
    7. All hosts on the 192.168.1 network (ie broadcast) is the reserved address "192.168.1.255". It is better to think of this as "the network prefix followed by all-one bits in the host portion".
    8. +
    +

    When you set REV_SERVER_CIDR=192.168.1.0/24 you are telling Pi-hole that reverse queries for the host range 192.168.1.1 through 192.168.1.254 should be sent to the REV_SERVER_TARGET=192.168.1.5.

    +
  • +
+
+

Pi-hole Web GUI

+

Note: in order for Web GUI settings to have any effects, you need to configure +the RPi or other machines to use it. This is described in the next topics.

+

Connecting to the GUI

+

Point your browser to:

+
http://«your_ip»:8089/admin
+
+

where «your_ip» can be:

+
    +
  • The IP address of the Raspberry Pi running Pi-hole.
  • +
  • The domain name of the Raspberry Pi running Pi-hole.
  • +
  • The multicast DNS name (eg "raspberrypi.local") of the Raspberry Pi running Pi-hole.
  • +
+

Adding local domain names

+

Login to the Pi-hole web interface: http://raspberrypi.local:8089/admin:

+
    +
  1. Select from Left menu: Local DNS -> DNS Records
  2. +
  3. Enter Domain: raspberrypi.home.arpa and the RPi's IP Address, e.g. 192.168.1.10.
  4. +
  5. Press Add.
  6. +
+

Now you can use raspberrypi.home.arpa as the domain name for the Raspberry Pi +in your whole local network. You can also add domain names for your other +devices, provided they too have static IPs.

+
+

why .home.arpa?

+

Instead of .home.arpa - which is the real standard, but a mouthful - you +can use .internal. Using .local would technically work, but it should +be reserved for mDNS use only.

+
+

Configure your Pi to use Pi-hole

+

The Raspberry Pi itself does not have to use the Pi-hole container for its own DNS services. Some chicken-and-egg situations can exist if, for example, the Pi-hole container is down when another process (eg apt or docker-compose) needs to do something that depends on DNS services being available.

+

Nevertheless, if you configure Pi-hole to be local DNS resolver, then you will probably want to configure your Raspberry Pi to use the Pi-hole container in the first instance, and then fall back to a public DNS server if the container is down. As a beginner, this is probably what you want regardless. Do this by running the commands:

+
$ echo "name_servers=127.0.0.1" | sudo tee -a /etc/resolvconf.conf
+$ echo "name_servers_append=8.8.8.8" | sudo tee -a /etc/resolvconf.conf
+$ echo "resolv_conf_local_only=NO" | sudo tee -a /etc/resolvconf.conf
+$ sudo resolvconf -u
+
+

This results in a configuration that will continue working, even if the Pi-hole +container isn't running.

+
+Detailed explanations of these commands +
    +
  1. +

    name_servers=127.0.0.1 instructs the Raspberry Pi to direct DNS queries to the loopback address. Port 53 is implied. If the Pi-hole container is running in:

    +
      +
    • non-host mode, Docker is listening to port 53 and forwards the queries to the Pi-hole container;
    • +
    • host mode, the Pi-hole container is listening to port 53.
    • +
    +
  2. +
  3. +

    name_servers_append=8.8.8.8 instructs the Raspberry Pi to fail-over to 8.8.8.8 if Pi-hole does not respond. You can replace 8.8.8.8 (a Google service) with:

    +
      +
    • Another well-known public DNS server like 1.1.1.1 (Cloudflare).
    • +
    • The IP address(es) of your ISP's DNS hosts (generally available from your ISP's web site).
    • +
    • The IP address of another DNS server running in your local network (eg BIND9).
    • +
    • The IP address of your home router. Most home routers default to the ISP's DNS hosts but you can usually change your router's configuration to bypass your ISP in favour of public servers like 8.8.8.8 and 1.1.1.1.
    • +
    +

    You need slightly different syntax if you want to add multiple fallback servers. For example, suppose your fallback hosts are a local server (eg 192.168.1.2) running BIND9 and 8.8.8.8. The command would be:

    +
    $ echo 'name_servers_append="192.168.1.2 8.8.8.8"' | sudo tee -a /etc/resolvconf.conf
    +
    +
  4. +
  5. +

    resolv_conf_local_only=NO is needed so that 127.0.0.1 and 8.8.8.8 can coexist.

    +
  6. +
  7. The resolvconf -u command instructs Raspberry Pi OS to rebuild the active resolver configuration. In principle, that means parsing /etc/resolvconf.conf to derive /etc/resolv.conf. This command can sometimes return the error "Too few arguments". You should ignore that error.
  8. +
+
flowchart LR
+  RERECONF["/etc/resolvconf.conf"] --- UP([resolvconf -u])
+  DHCP[DHCP provided DNS-server] --- UP
+  UP -- "generates" --> RECONF["/etc/resolv.conf"]
+  classDef command fill:#9996,stroke-width:0px
+  class UP command
+
+
+Advanced options: ignoring DHCP provided DNS-servers, local domain name search +
    +
  • +

    If you wish to prevent the Raspberry Pi from including the address(es) of DNS servers learned from DHCP, you can instruct the DHCP client running on the Raspberry Pi to ignore the information coming from the DHCP server:

    +
    $ echo 'nooption domain_name_servers' | sudo tee -a /etc/dhcpcd.conf
    +$ sudo service dhcpcd reload
    +$ sudo resolvconf -u
    +
    +
  • +
  • +

    If you have followed the steps in Adding local domain names to define names for your local hosts, you can inform the Raspberry Pi of that fact like this:

    +
    $ echo 'search_domains=home.arpa' | sudo tee -a /etc/resolvconf.conf
    +$ sudo resolvconf -u
    +
    +

    That will add the following line to /etc/resolv.conf:

    +
    search home.arpa
    +
    +

    Then, when you refer to a host by a short name (eg "fred") the Raspberry Pi will also consider "fred.home.arpa" when trying to discover the IP address.

    +
  • +
+
+
+Interaction with other containers +

Docker provides a special IP 127.0.0.11, which listens to DNS queries and +resolves them according to the host RPi's resolv.conf. Containers usually +rely on this to perform DNS lookups. This is nice as it won't present any +surprises as DNS lookups on both the host and in the containers will yeild +the same results.

+

It's possible to make DNS queries directly cross-container, and even +supported in some rare use-cases.

+
+

Using Pi-hole as your local DNS

+

To use the Pi-hole in your LAN, you need to assign the Raspberry Pi a fixed IP-address and configure this IP as your DNS server.

+

Fixed IP address for Pi-hole

+

If you want clients on your network to use Pi-hole for their DNS, the Raspberry Pi running Pi-hole must have a fixed IP address. It does not have to be a static IP address (in the sense of being hard-coded into the Raspberry Pi). The Raspberry Pi can still obtain its IP address from DHCP at boot time, providing your DHCP server (usually your home router) always returns the same IP address. This is usually referred to as a static binding and associates the Raspberry Pi's MAC address with a fixed IP address.

+

Keep in mind that many Raspberry Pis have both Ethernet and WiFi interfaces. It is generally prudent to establish static bindings for both network interfaces in your DHCP server.

+

You can use the following command to discover the MAC addresses for your Raspberry Pi's Ethernet and WiFi interfaces:

+
$ for I in eth0 wlan0 ; do ip link show $I ; done
+2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
+    link/ether dc:a6:32:4c:89:f9 brd ff:ff:ff:ff:ff:ff
+3: wlan0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
+    link/ether e5:4f:01:41:88:b2 brd ff:ff:ff:ff:ff:ff
+
+

In the above:

+
    +
  • The MAC address of the Ethernet interface is "dc:a6:32:4c:89:f9"
  • +
  • The MAC address of the WiFi interface is "e5:4f:01:41:88:b2"
  • +
+

If a physical interface does not exist, the command returns "Device does not exist" for that interface. If you prefer, you can also substitute the ifconfig command for ip link show. It's just a little more wordy.

+

Configure clients to use Pi-hole

+

In order for Pi-hole to block ads or resolve anything, clients need to be told to use it as their DNS server. You can either:

+
    +
  1. Adopt a whole-of-network approach and edit the DNS settings in your DHCP server so that all clients are given the IP address of the Raspberry Pi running Pi-hole to use for DNS services when a lease is issued.
  2. +
  3. Adopt a case-by-case (manual) approach where you instruct particular clients to obtain DNS services from the IP address of the Raspberry Pi running Pi-hole.
  4. +
+

Option 1 (whole-of-network) is the simplest approach. Assuming your Raspberry Pi has the static IP 192.168.1.10:

+
    +
  1. +

    Go to your network's DHCP server. In most home networks, this will be your Wireless Access Point/WLAN Router:

    +
      +
    • Login into its web-interface
    • +
    • Find where DNS servers are defined (generally with DHCP controls)
    • +
    • Change all DNS fields to 192.168.1.10
    • +
    +
  2. +
  3. +

    All local clients have to be rebooted. Without this they will continue to use the old DNS setting from an old DHCP lease for quite some time.

    +
  4. +
+

Option 2 (case-by-case) generally involves finding the IP configuration options for each host and setting the DNS server manually. Manual changes are usually effective immediately without needing a reboot.

+
+advanced configurations +

Setting up a combination of Pi-hole (for ad-blocking services), and/or a local upstream DNS resolver (eg BIND9) to be authoritative for a local domain and reverse-resolution for your local IP addresses, and decisions about where each DNS server forwards queries it can't answer (eg your ISP's DNS servers, or Google's 8.8.8.8, or Cloudflare's 1.1.1.1) is a complex topic and depends on your specific needs.

+

The same applies to setting up a DHCP server (eg DHCPD) which is capable of distinguishing between the various clients on your network (ie by MAC address) to make case-by-case decisions as to where each client should obtain its DNS services.

+

If you need help, try asking questions on the IOTstack Discord channel.

+
+

Testing and Troubleshooting

+

Make these assumptions:

+
    +
  1. +

    You have followed the instructions above to add these lines to /etc/resolvconf.conf:

    +
    name_servers=127.0.0.1
    +name_servers_append=8.8.8.8
    +resolv_conf_local_only=NO
    +
    +
  2. +
  3. +

    The Raspberry Pi running Pi-hole has the IP address 192.168.1.10 which it obtains as a static assignment from your DHCP server.

    +
  4. +
  5. You have configured your DHCP server to provide 192.168.1.10 for client devices to use to obtain DNS services (ie, you are saying clients should use Pi-hole for DNS).
  6. +
+

The result of the configuration appears in /etc/resolv.conf:

+
$ cat /etc/resolv.conf
+# Generated by resolvconf
+nameserver 127.0.0.1
+nameserver 192.168.1.10
+nameserver 8.8.8.8
+
+

Interpretation:

+
    +
  • nameserver 127.0.0.1 is present because of name_servers=127.0.0.1
  • +
  • nameserver 192.168.1.10 is present because it was learned from DHCP
  • +
  • nameserver 8.8.8.8 is present because of name_servers_append=8.8.8.8
  • +
+

The fact that the Raspberry Pi is effectively represented twice (once as 127.0.0.1, and again as 192.168.1.10) does not matter. If the Pi-hole container stops running, the Raspberry Pi will bypass 192.168.1.10 and fail over to 8.8.8.8, failing back to 127.0.0.1 when the Pi-hole container starts again.

+

Install dig:

+
$ sudo apt install dnsutils
+
+

Test that Pi-hole is correctly configured (should respond 192.168.1.10):

+
$ dig raspberrypi.home.arpa @192.168.1.10
+
+

To test on another machine if your network's DNS configuration is correct, and +an ESP will resolve its DNS queries correctly, restart the other machine to +ensure DNS changes are updated and then use:

+
$ dig raspberrypi.home.arpa
+
+

This should produce the same result as the previous command.

+

If this fails to resolve the IP, check that the server in the response is +192.168.1.10. If it's 127.0.0.xx check /etc/resolv.conf begins with +nameserver 192.168.1.10. If not, check the machine is configured to use DHCP +and revisit Pi-hole as DNS.

+

Microcontrollers

+

If you want to avoid hardcoding your Raspberry Pi IP to your ESPhome devices, +you need a DNS server that will do the resolving. This can be done using the +Pi-hole container as described above.

+

*.local won't work for ESPhome

+

There is a special case for resolving *.local addresses. If you do a ping raspberrypi.local on your desktop Linux or the Raspberry Pi, it will first try using mDNS/bonjour to resolve the IP address raspberrypi.local. If this fails it will then ask the DNS server. ESPhome devices can't use mDNS to resolve an IP address. You need a proper DNS server to respond to queries made by an ESP. As such, dig raspberrypi.local will fail, simulating ESPhome device behavior. This is as intended, and you should use raspberrypi.home.arpa as the address on your ESP-device.

+

Getting a clean slate

+

If Pi-hole misbehaves, you can always try starting from a clean slate by erasing Pi-hole's persistent storage area. Erasing the persistent storage area causes PiHole to re-initialise its data structures on the next launch. You will lose:

+
    +
  1. Any configuration options you may have set via the web GUI that are not otherwise reflected in environment variables.
  2. +
  3. Any whitelist, blacklist or local DNS records you entered.
  4. +
  5. All DNS resolution and blocking history.
  6. +
+

Also note that your administrative password will reset.

+

The recommended approach is:

+
    +
  1. Login to Pi-hole's web GUI and navigate to Settings » Teleporter.
  2. +
  3. Click the Backup button to download a backup.
  4. +
  5. Logout of the Web GUI.
  6. +
  7. +

    Run the following commands:

    +
    $ cd ~/IOTstack
    +$ docker-compose down pihole
    +$ sudo rm -rf ./volumes/pihole
    +$ docker-compose up -d pihole
    +
    +
    +

    see also if downing a container doesn't work

    +
    +
  8. +
  9. +

    Login to Pi-hole's web GUI and navigate to Settings » Teleporter.

    +
  10. +
  11. Use the checkboxes to select the settings you wish to restore, and click the Browse and Restore buttons.
  12. +
+

Docker Desktop

+

If you run Pi-hole using Docker Desktop for macOS, all client activity will be logged against the IP address of the default gateway on the internal bridged network.

+

It appears that Docker Desktop for macOS interposes an additional level of Network Address Translation (NAT) between clients and the Pi-hole service. This does not affect Pi-hole's ability to block ads. It just makes the GUI reports a little less useful.

+

It is not known whether this is peculiar to Docker Desktop for macOS or also affects other variants of Docker Desktop.

+

This problem does not affect Pi-hole running in a container on a Raspberry Pi.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Plex/index.html b/Containers/Plex/index.html new file mode 100644 index 000000000..c25df0a50 --- /dev/null +++ b/Containers/Plex/index.html @@ -0,0 +1,2240 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Plex - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Plex

+

References

+ +

Web interface

+

The web UI can be found on "your_ip":32400/web

+

Mounting an external drive by UUID to the home directory

+

official mounting guide

+

Create a directory in you home directory called mnt with a subdirectory HDD. Follow the instruction above to mount your external drive to /home/pi/mnt/HDD in you fstab edit your docker-compose.yml file under plex and uncomment the volumes for tv series and movies (modify the path to point to your media locations). Run docker-compose up -d to rebuild plex with the new volumes

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Portainer-agent/index.html b/Containers/Portainer-agent/index.html new file mode 100644 index 000000000..7ccf3da69 --- /dev/null +++ b/Containers/Portainer-agent/index.html @@ -0,0 +1,2247 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Portainer agent - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Portainer agent

+

References

+ +

About

+

The portainer agent is a great way to add a second docker instance to an existing portainer instance. This allows you to manage multiple docker environments from one portainer instance.

+

Adding to an existing instance

+

When you want to add the agent to an existing portainer instance.

+
    +
  • You go to the endpoints tab.
  • +
  • Click on Add endpoint
  • +
  • Select Agent
  • +
  • Enter the name of the agent
  • +
  • Enter the url of the endpoint ip-of-agent-instance:9001
  • +
  • Click on add endpoint
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Portainer-ce/index.html b/Containers/Portainer-ce/index.html new file mode 100644 index 000000000..34636834a --- /dev/null +++ b/Containers/Portainer-ce/index.html @@ -0,0 +1,2389 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Portainer CE - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Portainer CE

+

References

+ +

Definition

+
    +
  • +

    "#yourip" means any of the following:

    +
      +
    • the IP address of your Raspberry Pi (eg 192.168.1.10)
    • +
    • the multicast domain name of your Raspberry Pi (eg iot-hub.local)
    • +
    • the domain name of your Raspberry Pi (eg iot-hub.mydomain.com)
    • +
    +
  • +
+

About Portainer CE

+

Portainer CE (Community Edition) is an application for managing Docker. It is a successor to Portainer. According to the Portainer CE documentation

+
+

Portainer 1.24.x will continue as a separate code branch, released as portainer/portainer:latest, and will receive ongoing security updates until at least 1st Sept 2021. No new features will be added beyond what was available in 1.24.1.

+
+

From that it should be clear that Portainer is deprecated and that Portainer CE is the way forward.

+

Installing Portainer CE

+

Run the menu:

+
$ cd ~/IOTstack
+$ ./menu.sh
+
+

Choose "Build Stack", select "Portainer-ce", press [TAB] then "\<Ok>" and follow through to the end of the menu process, typically choosing "Do not overwrite" for any existing services. When the menu finishes:

+
$ docker-compose up -d
+
+

Ignore any message like this:

+
+

WARNING: Found orphan containers (portainer) for this project …

+
+

First run of Portainer CE

+

In your web browser navigate to #yourip:9000/:

+
    +
  • the first screen will suggest a username of "admin" and ask for a password. Supply those credentials and click "Create User".
  • +
  • the second screen will ask you to select a connection method. For IOTstack, "Docker (Manage the local Docker environment)" is usually appropriate so click that and then click "Connect".
  • +
+

From there, you can click on the "Local" group and take a look around. One of the things Portainer CE can help you do is find unused containers but beware of reading too much into this because, sometimes, an "unused" container is actually the base for another container (eg Node-RED).

+

There are 'Quick actions' to view logs and other stats. This can all be done from terminal commands but Portainer CE makes it easier.

+

Setting the Public IP address

+

If you click on a "Published Port" in the "Containers" list, your browser may return an error saying something like "can't connect to server" associated with an IP address of "0.0.0.0".

+

To fix that problem, proceed as shown below:

+

Set Public IP address

+
    +
  1. Click "Environments" in the left hand panel.
  2. +
  3. Click the name "local" in the list of Environments.
  4. +
  5. Click in the "Public IP" field. Enter one of the following:
      +
    • The multicast DNS (MDNS) name of your Raspberry Pi (eg iot-hub.local)
    • +
    • The fully-qualified domain name (FQDN) of your Raspberry Pi (eg iot-hub.mydomain.com)
    • +
    • The IP address of your Raspberry Pi (eg 192.168.1.10)
    • +
    +
  6. +
  7. Click "Update environment".
  8. +
+
+

To remove the Public IP address, repeat the above steps but clear the "Public IP" field in step 3.

+
+

The reason why you have to tell Portainer CE which Public IP address to use is because an instance of Portainer CE does not necessarily have to be running on the same Raspberry Pi as the Docker containers it is managing.

+

Keep in mind that clicking on a "Published Port" does not guarantee that your browser can open a connection. For example:

+
    +
  • Port 1883 for Mosquitto expects MQTT packets. It will not respond to HTTP, so any attempt will fail.
  • +
  • Port 8089 for PiHole will respond to HTTP but PiHole may reject or mis-handle your attempt.
  • +
  • Port 1880 for NodeRed will respond normally.
  • +
+
+

All things considered, you will get more consistent behaviour if you simply bookmark the URLs you want to use for your IOTstack services.

+
+

Notes:

+
    +
  • Earlier documentation for Portainer-CE used the term "endpoint" for what is now being called the "environment".
  • +
  • The "environment" being discussed in this section is Portainer-CE's environment. It should not be confused with the tools Portainer-CE provides for managing a container's environment (eg setting environment variables).
  • +
+

If you forget your password

+

If you forget the password you created for Portainer CE, you can recover by doing the following:

+
$ cd ~/IOTstack
+$ docker-compose stop portainer-ce
+$ sudo rm -r ./volumes/portainer-ce
+$ docker-compose start portainer-ce
+
+

Then, follow the steps in:

+
    +
  1. First run of Portainer CE; and
  2. +
  3. Setting the Public IP address.
  4. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/PostgreSQL/index.html b/Containers/PostgreSQL/index.html new file mode 100644 index 000000000..1aa7ae6d9 --- /dev/null +++ b/Containers/PostgreSQL/index.html @@ -0,0 +1,2369 @@ + + + + + + + + + + + + + + + + + + + + + + + + + PostgreSQL - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

PostgreSQL

+

References

+ +

About

+

PostgreSQL is an SQL server, for those that need an SQL database.

+

The database is available on port 5432

+

Configuration

+

The service definition includes the following environment variables:

+
    +
  • TZ your timezone. Defaults to Etc/UTC
  • +
  • POSTGRES_USER. Initial username. Defaults to postuser.
  • +
  • POSTGRES_PASSWORD. Initial password associated with initial username. Defaults to IOtSt4ckpostgresDbPw (postpassword for old menu).
  • +
  • POSTGRES_DB. Initial database. Defaults to postdb.
  • +
+

You can either edit the environment variables directly or provide your own substitutes by editing ~/IOTstack/.env. Example:

+
$ cat ~/IOTstack/.env
+TZ=Australia/Sydney
+POSTGRES_PASSWORD=oneTwoThree
+
+

When the container is brought up:

+
    +
  • TZ will have the value Australia/Sydney (from .env)
  • +
  • POSTGRES_PASSWORD will have the value oneTwoThree (from .env)
  • +
  • POSTGRES_USER will have the value postuser (the default); and
  • +
  • POSTGRES_DB will have the value postdb (the default).
  • +
+

The TZ variable takes effect every time the container is brought up. The other environment variables only work the first time the container is brought up.

+

It is highly recommended to select your own password before you launch the container for the first time. See also Getting a clean slate.

+

Management

+

You can interact with the PostgreSQL Relational Database Management System running in the container via its psql command. You can invoke psql like this:

+
$ docker exec -it postgres bash -c 'PGPASSWORD=$POSTGRES_PASSWORD psql $POSTGRES_DB $POSTGRES_USER'
+
+
+

Because of the single quotes (') surrounding everything after the -c, expansion of the environment variables is deferred until the command is executed inside the container.

+
+

You can use any of the following methods to exit psql:

+
    +
  • Type "\q" and press return
  • +
  • Type "exit" and press return
  • +
  • Press control+D
  • +
+

password change

+

Once you have logged into psql you can reset the password like this:

+
# ALTER USER «user» WITH PASSWORD '«password»';
+
+

Replace:

+
    +
  • «user» with the username (eg the default username is postuser)
  • +
  • «password» with your new password.
  • +
+

Notes:

+
    +
  • Changing the password via the ALTER command does not update the value of the POSTGRES_PASSWORD environment variable. You need to do that by hand.
  • +
  • +

    Whenever you make a change to a running container's environment variables, the changes will not take effect until you re-create the container by running:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d postgresql
    +
    +
  • +
+

Getting a clean slate

+

If you need to start over, proceed like this:

+
$ cd ~/IOTstack
+$ docker-compose down postgres
+$ sudo rm -rf ./volumes/postgres
+$ docker-compose up -d postgres
+
+
+

see also if downing a container doesn't work

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Prometheus/index.html b/Containers/Prometheus/index.html new file mode 100644 index 000000000..d3d7c567b --- /dev/null +++ b/Containers/Prometheus/index.html @@ -0,0 +1,2954 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Prometheus - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Prometheus

+

References

+ +

Special note 2022-11-08

+

Issue 620 pointed out there was an error in the default configuration file. That has been fixed. To adopt it, please do the following:

+
    +
  1. +

    If Prometheus and/or any of its associated containers are running, take them down:

    +
    $ cd ~/IOTstack
    +$ docker-compose down prometheus prometheus-cadvisor prometheus-nodeexporter
    +
    +
    +

    see also if downing a container doesn't work

    +
    +
  2. +
  3. +

    Move the existing active configuration out of the way:

    +
    $ cd ~/IOTstack/volumes/prometheus/data/config
    +$ mv config.yml config.yml.old
    +
    +
  4. +
  5. +

    Make sure that the service definitions in your docker-compose.yml are up-to-date by comparing them with the template versions:

    +
      +
    • ~/IOTstack/.templates/prometheus/service.yml
    • +
    • ~/IOTstack/.templates/prometheus-cadvisor/service.yml
    • +
    • ~/IOTstack/.templates/prometheus-nodeexporter/service.yml
    • +
    +

    Your service definitions and those in the templates do not need to be identical, but you should be able to explain any differences.

    +
  6. +
  7. +

    Rebuild your Prometheus container by following the instructions in Upgrading Prometheus. Rebuilding will import the updated default configuration into the container's image.

    +
  8. +
  9. +

    Start the service:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d prometheus
    +
    +

    Starting prometheus should start prometheus-cadvisor and prometheus-nodeexporter automatically. Because the old configuration has been moved out of the way, the container will supply a new version as a default.

    +
  10. +
  11. +

    Compare the configurations:

    +
    $ cd ~/IOTstack/volumes/prometheus/data/config
    +$ diff -y config.yml.old config.yml
    +global:                          global:
    +  scrape_interval: 10s             scrape_interval: 10s
    +  evaluation_interval: 10s         evaluation_interval: 10s
    +
    +scrape_configs:                  scrape_configs:
    +  - job_name: "iotstack"           - job_name: "iotstack"
    +    static_configs:                  static_configs:
    +      - targets:                       - targets:
    +        - localhost:9090                 - localhost:9090
    +        - cadvisor:8080        |         - prometheus-cadvisor:8080
    +        - nodeexporter:9100    |         - prometheus-nodeexporter:9100
    +
    +

    In the output above, the vertical bars (|) in the last two lines indicate that those lines have changed. The "old" version is on the left, "new" on the right.

    +

    If you have made other alterations to your config then you should see other change indicators including <, | and >. If so, you should hand-merge your own changes from config.yml.old into config.yml and then restart the container:

    +
    $ cd ~/IOTstack
    +$ docker-compose restart prometheus
    +
    +
  12. +
+

Overview

+

Prometheus is a collection of three containers:

+
    +
  • Prometheus
  • +
  • CAdvisor
  • +
  • Node Exporter
  • +
+

The default configuration for Prometheus supplied with IOTstack scrapes information from all three containers.

+

Installing Prometheus

+

if you are running New Menu …

+

When you select Prometheus in the IOTstack menu, you must also select:

+
    +
  • prometheus-cadvisor; and
  • +
  • prometheus-nodeexporter.
  • +
+

If you do not select all three containers, Prometheus will not start.

+

if you are running Old Menu …

+

When you select Prometheus in the IOTstack menu, the service definition includes the three containers:

+
    +
  • prometheus
  • +
  • prometheus-cadvisor; and
  • +
  • prometheus-nodeexporter.
  • +
+

Significant directories and files

+
~/IOTstack
+├── .templates
+│   └── prometheus
+│       ├── service.yml ❶
+│       ├── Dockerfile ❷
+│       ├── docker-entrypoint.sh ❸
+│       └── iotstack_defaults ❹
+│           └── config.yml
+├── services
+│   └── prometheus
+│       └── service.yml ❺
+├── docker-compose.yml ❻
+└── volumes
+    └── prometheus ❼
+        └── data
+            ├── config ❽
+            │   ├── config.yml
+            │   └── prometheus.yml
+            └── data
+
+
    +
  1. The template service definition.
  2. +
  3. The Dockerfile used to customise Prometheus for IOTstack.
  4. +
  5. A pre-launch script to handle container self-repair before launching the Prometheus service.
  6. +
  7. Defaults for IOTstack, used to initialise on first run, and for container self-repair.
  8. +
  9. The working service definition (only relevant to old-menu, copied from ❶).
  10. +
  11. The Compose file (includes ❶).
  12. +
  13. The persistent storage area.
  14. +
  15. The configuration directory.
  16. +
+

How Prometheus gets built for IOTstack

+

Prometheus source code (GitHub)

+

The source code for Prometheus lives at GitHub prometheus/prometheus.

+

Prometheus images (DockerHub)

+

Periodically, the source code is recompiled and the resulting image is pushed to prom/prometheus on DockerHub.

+

IOTstack menu

+

When you select Prometheus in the IOTstack menu, the template service definition is copied into the Compose file.

+
+

Under old menu, it is also copied to the working service definition and then not really used.

+
+

IOTstack first run

+

On a first install of IOTstack, you run the menu, choose Prometheus as one of your containers, and are told to do this:

+
$ cd ~/IOTstack
+$ docker-compose up -d
+
+

docker-compose reads the Compose file. When it arrives at the prometheus fragment, it finds:

+
prometheus:
+  container_name: prometheus
+  build: ./.templates/prometheus/.
+
+

The build statement tells docker-compose to look for:

+
~/IOTstack/.templates/prometheus/Dockerfile
+
+
+

The Dockerfile is in the .templates directory because it is intended to be a common build for all IOTstack users. This is different to the arrangement for Node-RED where the Dockerfile is in the services directory because it is how each individual IOTstack user's version of Node-RED is customised.

+
+

The Dockerfile begins with:

+
FROM prom/prometheus:latest
+
+
+

If you need to pin to a particular version of Prometheus, the Dockerfile is the place to do it. See Prometheus version pinning.

+
+

The FROM statement tells the build process to pull down the base image from DockerHub.

+
+

It is a base image in the sense that it never actually runs as a container on your Raspberry Pi.

+
+

The remaining instructions in the Dockerfile customise the base image to produce a local image. The customisations are:

+
    +
  1. Add configuration defaults appropriate for IOTstack.
  2. +
  3. +

    Add docker-entrypoint.sh which:

    +
      +
    • Ensures the internal directory /prometheus/config/ exists;
    • +
    • Copies any configuration files that have gone missing into that directory.
    • +
    • Enforces "pi:pi" ownership in ~/IOTstack/volumes/prometheus/data/config.
    • +
    • Launches the Prometheus service.
    • +
    +
  4. +
+

The local image is instantiated to become your running container.

+

When you run the docker images command after Prometheus has been built, you may see two rows for Prometheus:

+
$ docker images
+REPOSITORY           TAG         IMAGE ID       CREATED          SIZE
+iotstack_prometheus  latest      1815f63da5f0   23 minutes ago   169MB
+prom/prometheus      latest      3f9575991a6c   3 days ago       169MB
+
+
    +
  • prom/prometheus is the base image; and
  • +
  • iotstack_prometheus is the local image.
  • +
+

You may see the same pattern in Portainer, which reports the base image as "unused". You should not remove the base image, even though it appears to be unused.

+
+

Whether you see one or two rows depends on the version of docker-compose you are using and how your version of docker-compose builds local images.

+
+

Dependencies: CAdvisor and Node Exporter

+

The CAdvisor and Node Exporter are included in the Prometheus service definition as dependent containers. What that means is that each time you start Prometheus, docker-compose ensures that CAdvisor and Node Exporter are already running, and keeps them running.

+

The default configuration for Prometheus assumes CAdvisor and Node Exporter are running and starts scraping information from those targets as soon as it launches.

+

Configuring Prometheus

+

Configuration directory

+

The configuration directory for the IOTstack implementation of Prometheus is at the path:

+
~/IOTstack/volumes/prometheus/data/config
+
+

That directory contains two files:

+
    +
  • config.yml; and
  • +
  • prometheus.yml.
  • +
+

If you delete either file, Prometheus will replace it with a default the next time the container starts. This "self-repair" function is intended to provide reasonable assurance that Prometheus will at least start instead of going into a restart loop.

+

Unless you decide to change it, the config folder and its contents are owned by "pi:pi". This means you can edit the files in the configuration directory without needing the sudo command. Ownership is enforced each time the container restarts.

+

Active configuration file

+

The file named config.yml is the active configuration. This is the file you should edit if you want to make changes. The default structure of the file is:

+
global:
+  scrape_interval: 10s
+  evaluation_interval: 10s
+
+scrape_configs:
+  - job_name: "iotstack"
+    static_configs:
+      - targets:
+        - localhost:9090
+        - cadvisor:8080
+        - nodeexporter:9100
+
+

To cause a running instance of Prometheus to notice a change to this file:

+
$ cd ~/IOTstack
+$ docker-compose restart prometheus
+$ docker logs prometheus
+
+

Note:

+
    +
  • The YAML parser used by Prometheus seems to be exceptionally sensitive to syntax errors (far less tolerant than docker-compose). For this reason, you should always check the Prometheus log after any configuration change.
  • +
+

Reference configuration file

+

The file named prometheus.yml is a reference configuration. It is a copy of the original configuration file that ships inside the Prometheus container at the path:

+
/etc/prometheus/prometheus.yml
+
+

Editing prometheus.yml has no effect. It is provided as a convenience to help you follow examples on the web. If you want to make the contents of prometheus.yml the active configuration, you need to do this:

+
$ cd ~/IOTstack/volumes/prometheus/data/config
+$ cp prometheus.yml config.yml
+$ cd ~/IOTstack
+$ docker-compose restart prometheus
+$ docker logs prometheus
+
+

Environment variables

+

The IOTstack implementation of Prometheus supports two environment variables:

+
environment:
+  - IOTSTACK_UID=1000
+  - IOTSTACK_GID=1000
+
+

Those variables control ownership of the Configuration directory and its contents. Those environment variables are present in the standard IOTstack service definition for Prometheus and have the effect of assigning ownership to "pi:pi".

+

If you delete those environment variables from your Compose file, the Configuration directory will be owned by "nobody:nobody"; otherwise the directory and its contents will be owned by whatever values you pass for those variables.

+

Migration considerations

+

Under the original IOTstack implementation of Prometheus (just "as it comes" from DockerHub), the service definition expected the configuration file to be at:

+
~/IOTstack/services/prometheus/config.yml
+
+

Under this implementation of Prometheus, the configuration file has moved to:

+
~/IOTstack/volumes/prometheus/data/config/config.yml
+
+
+

The change of location is one of the things that allows self-repair to work properly.

+
+

Some of the assumptions behind the default configuration file have changed. In particular, instead of the entire scrape_configs block being commented-out, it is active and defines localhost, cadvisor and nodeexporter as targets.

+

You should compare the old and new versions and decide which settings need to be migrated into the new configuration file.

+

If you change the configuration file, restart Prometheus and then check the log for errors:

+
$ docker-compose restart prometheus
+$ docker logs prometheus
+
+

Note:

+
    +
  • The YAML parser used by Prometheus is very sensitive to syntax errors. Always check the Prometheus log after any configuration change.
  • +
+

Upgrading Prometheus

+

You can update cadvisor and nodeexporter like this:

+
$ cd ~/IOTstack
+$ docker-compose pull cadvisor nodeexporter
+$ docker-compose up -d
+$ docker system prune
+
+

In words:

+
    +
  • docker-compose pull downloads any newer images;
  • +
  • docker-compose up -d causes any newly-downloaded images to be instantiated as containers (replacing the old containers); and
  • +
  • the prune gets rid of the outdated images.
  • +
+

This "simple pull" strategy doesn't work when a Dockerfile is used to build a local image on top of a base image downloaded from DockerHub. The local image is what is running so there is no way for the pull to sense when a newer version becomes available.

+

The only way to know when an update to Prometheus is available is to check the prom/prometheus tags page on DockerHub.

+

Once a new version appears on DockerHub, you can upgrade Prometheus like this:

+
$ cd ~/IOTstack
+$ docker-compose build --no-cache --pull prometheus
+$ docker-compose up -d prometheus
+$ docker system prune
+$ docker system prune
+
+

Breaking it down into parts:

+
    +
  • build causes the named container to be rebuilt;
  • +
  • --no-cache tells the Dockerfile process that it must not take any shortcuts. It really must rebuild the local image;
  • +
  • --pull tells the Dockerfile process to actually check with DockerHub to see if there is a later version of the base image and, if so, to download it before starting the build;
  • +
  • prometheus is the named container argument required by the build command.
  • +
+

Your existing Prometheus container continues to run while the rebuild proceeds. Once the freshly-built local image is ready, the up tells docker-compose to do a new-for-old swap. There is barely any downtime for your service.

+

The prune is the simplest way of cleaning up. The first call removes the old local image. The second call cleans up the old base image.

+
+

Whether an old base image exists depends on the version of docker-compose you are using and how your version of docker-compose builds local images.

+
+

Prometheus version pinning

+

If you need to pin Prometheus to a particular version:

+
    +
  1. +

    Use your favourite text editor to open the following file:

    +
    ~/IOTstack/.templates/prometheus/Dockerfile
    +
    +
  2. +
  3. +

    Find the line:

    +
    FROM prom/prometheus:latest
    +
    +
  4. +
  5. +

    Replace latest with the version you wish to pin to. For example, to pin to version 2.30.2:

    +
    FROM prom/prometheus:2.30.2
    +
    +
  6. +
  7. +

    Save the file and tell docker-compose to rebuild the local image:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d --build prometheus
    +$ docker system prune
    +
    +

    The new local image is built, then the new container is instantiated based on that image. The prune deletes the old local image.

    +
  8. +
+

Note:

+
    +
  • As well as preventing Docker from updating the base image, pinning will also block incoming updates to the Dockerfile from a git pull. Nothing will change until you decide to remove the pin.
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Python/index.html b/Containers/Python/index.html new file mode 100644 index 000000000..822102ae3 --- /dev/null +++ b/Containers/Python/index.html @@ -0,0 +1,2905 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Python - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Python

+

references

+ + +

When you select Python in the menu:

+
    +
  1. +

    The following folder and file structure is created:

    +
    $ tree ~/IOTstack/services/python
    +/home/pi/IOTstack/services/python
    +├── app
    +│   └── app.py
    +├── docker-entrypoint.sh
    +└── Dockerfile
    +
    +

    Note:

    +
      +
    • Under "old menu" (old-menu branch), the service.yml is also copied into the python directory but is then not used.
    • +
    +
  2. +
  3. +

    This service definition is added to your docker-compose.yml:

    +
    python:
    +  container_name: python
    +  build: ./services/python/.
    +  restart: unless-stopped
    +  environment:
    +  - TZ=Etc/UTC
    +  - IOTSTACK_UID=1000
    +  - IOTSTACK_GID=1000
    +# ports:
    +#   - "external:internal"
    +  volumes:
    +  - ./volumes/python/app:/usr/src/app
    +
    +
  4. +
+

customising your Python service definition

+

The service definition contains a number of customisation points:

+
    +
  1. restart: unless-stopped assumes your Python script will run in an infinite loop. If your script is intended to run once and terminate, you should remove this directive.
  2. +
  3. TZ=Etc/UTC should be set to your local time-zone. Never use quote marks on the right hand side of a TZ= variable.
  4. +
  5. +

    If you are running as a different user ID, you may want to change both IOTSTACK_UID and IOTSTACK_GID to appropriate values.

    +

    Notes:

    +
      +
    • Don't use user and group names because these variables are applied inside the container where those names are (probably) undefined.
    • +
    • +

      The only thing these variables affect is the ownership of:

      +
      ~/IOTstack/volumes/python/app
      +
      +

      and its contents. If you want everything to be owned by root, set both of these variables to zero (eg IOTSTACK_UID=0).

      +
    • +
    +
  6. +
  7. +

    If your Python script listens to data-communications traffic, you can set up the port mappings by uncommenting the ports: directive.

    +
  8. +
+

If your Python container is already running when you make a change to its service definition, you can apply it via:

+
$ cd ~/IOTstack
+$ docker-compose up -d python
+
+

Python - first launch

+

After running the menu, you are told to run the commands:

+
$ cd ~/IOTstack
+$ docker-compose up -d
+
+

This is what happens:

+
    +
  1. docker-compose reads your docker-compose.yml.
  2. +
  3. +

    When it finds the service definition for Python, it encounters:

    +
    build: ./services/python/.
    +
    +

    The leading period means "the directory containing docker-compose.yml while the trailing period means "Dockerfile", so the path expands to:

    +
    ~/IOTstack/services/python/Dockerfile
    +
    +
  4. +
  5. +

    The Dockerfile is processed. It downloads the base image for Python from Dockerhub and then makes changes including:

    +
      +
    • +

      copying the contents of the following directory into the image as a set of defaults:

      +
      /home/pi/IOTstack/services/python/app
      +
      +
    • +
    • +

      copying the following file into the image:

      +
      /home/pi/IOTstack/services/python/docker-entrypoint.sh
      +
      +

      The docker-entrypoint.sh script runs each time the container launches and performs initialisation and "self repair" functions.

      +
    • +
    +

    The output of the Dockerfile run is a new local image tagged with the name iotstack_python.

    +
  6. +
  7. +

    The iotstack_python image is instantiated to become the running container.

    +
  8. +
  9. +

    When the container starts, the docker-entrypoint.sh script runs and initialises the container's persistent storage area:

    +
    $ tree -pu ~/IOTstack/volumes
    +/home/pi/IOTstack/volumes
    +└── [drwxr-xr-x root    ]  python
    +    └── [drwxr-xr-x pi      ]  app
    +        └── [-rwxr-xr-x pi      ]  app.py
    +
    +

    Note:

    +
      +
    • the top-level python folder is owned by "root" but the app directory and its contents are owned by "pi".
    • +
    +
  10. +
  11. +

    The initial app.py Python script is a "hello world" placeholder. It runs as an infinite loop emitting messages every 10 seconds until terminated. You can see what it is doing by running:

    +
    $ docker logs -f python
    +The world is born. Hello World.
    +The world is re-born. Hello World.
    +The world is re-born. Hello World.
    +
    +
    +

    Pressing control+c terminates the log display but does not terminate the running container.

    +
  12. +
+

stopping the Python service

+

To stop the container from running, either:

+
    +
  • +

    take down your whole stack:

    +
    $ cd ~/IOTstack
    +$ docker-compose down
    +
    +
  • +
  • +

    terminate the python container

    +
    $ cd ~/IOTstack
    +$ docker-compose down python
    +
    +
    +

    see also if downing a container doesn't work

    +
    +
  • +
+

starting the Python service

+

To bring up the container again after you have stopped it, either:

+
    +
  • +

    bring up your whole stack:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d
    +
    +
  • +
  • +

    bring up the python container

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d python
    +
    +
  • +
+

Python - second-and-subsequent launch

+

Each time you launch the Python container after the first launch:

+
    +
  1. The existing local image (iotstack_python) is instantiated to become the running container.
  2. +
  3. The docker-entrypoint.sh script runs and performs "self-repair" by replacing any files that have gone missing from the persistent storage area. Self-repair does not overwrite existing files!
  4. +
  5. The app.py Python script is run.
  6. +
+

when things go wrong - check the log

+

If the container misbehaves, the log is your friend:

+
$ docker logs python
+
+

project development life-cycle

+

It is critical that you understand that all of your project development should occur within the folder:

+
~/IOTstack/volumes/python/app
+
+

So long as you are performing some sort of routine backup (either with a supplied script or a third party solution like Paraphraser/IOTstackBackup), your work will be protected.

+

getting started

+

Start by editing the file:

+
~/IOTstack/volumes/python/app/app.py
+
+

If you need other supporting scripts or data files, also add those to the directory:

+
~/IOTstack/volumes/python/app
+
+

Any time you change something in the app folder, tell the running python container to notice the change by:

+
$ cd ~/IOTstack
+$ docker-compose restart python
+
+

reading and writing to disk

+

Consider this line in the service definition:

+
- ./volumes/python/app:/usr/src/app
+
+

The leading period means "the directory containing docker-compose.yml" so it the same as:

+
- ~/IOTstack/volumes/python/app:/usr/src/app
+
+

Then, you split the line at the ":", resulting in:

+
    +
  • The external directory = ~/IOTstack/volumes/python/app
  • +
  • The internal directory = /usr/src/app
  • +
+

What it means is that:

+
    +
  • Any file you put into the external directory (or any sub-directories you create within the external directory) will be visible to your Python script running inside the container at the same relative position in the internal directory.
  • +
  • Any file or sub-directory created in the internal directory by your Python script running inside the container will be visible outside the container at the same relative position in the external directory.
  • +
  • The contents of external directory and, therefore, the internal directory will persist across container launches.
  • +
+

If your script writes into any other directory inside the container, the data will be lost when the container re-launches.

+

getting a clean slate

+

If you make a mess of things and need to start from a clean slate, erase the persistent storage area:

+
$ cd ~/IOTstack
+$ docker-compose down python
+$ sudo rm -rf ./volumes/python
+$ docker-compose up -d python
+
+
+

see also if downing a container doesn't work

+
+

The container will re-initialise the persistent storage area from its defaults.

+

adding packages

+

As you develop your project, you may find that you need to add supporting packages. For this example, we will assume you want to add "Flask" and "beautifulsoup4".

+

If you were developing a project outside of container-space, you would simply run:

+
$ pip3 install -U Flask beautifulsoup4
+
+

You can do the same thing with the running container:

+
$ docker exec python pip3 install -U Flask beautifulsoup4
+
+

and that will work — until the container is re-launched, at which point the added packages will disappear.

+

To make Flask and beautifulsoup4 a permanent part of your container:

+
    +
  1. +

    Change your working directory:

    +
    $ cd ~/IOTstack/services/python/app
    +
    +
  2. +
  3. +

    Use your favourite text editor to create the file requirements.txt in that directory. Each package you want to add should be on a line by itself:

    +
    Flask
    +beautifulsoup4
    +
    +
  4. +
  5. +

    Tell Docker to rebuild the local Python image:

    +
    $ cd ~/IOTstack
    +$ docker-compose build --force-rm python
    +$ docker-compose up -d --force-recreate python
    +$ docker system prune -f
    +
    +

    Note:

    +
      +
    • You will see a warning about running pip as root - ignore it.
    • +
    +
  6. +
  7. +

    Confirm that the packages have been added:

    +
    $ docker exec python pip3 freeze | grep -e "Flask" -e "beautifulsoup4"
    +beautifulsoup4==4.10.0
    +Flask==2.0.1
    +
    +
  8. +
  9. +

    Continue your development work by returning to getting started.

    +
  10. +
+

Note:

+
    +
  • +

    The first time you following the process described above to create requirements.txt, a copy will appear at:

    +
    ~/IOTstack/volumes/python/app/requirements.txt
    +
    +

    This copy is the result of the "self-repair" code that runs each time the container starts noticing that requirements.txt is missing and making a copy from the defaults stored inside the image.

    +

    If you make more changes to the master version of requirements.txt in the services directory and rebuild the local image, the copy in the volumes directory will not be kept in-sync. That's because the "self-repair" code never overwrites existing files.

    +

    If you want to bring the copy of requirements.txt in the volumes directory up-to-date:

    +
    $ cd ~/IOTstack
    +$ rm ./volumes/python/app/requirements.txt
    +$ docker-compose restart python
    +
    +

    The requirements.txt file will be recreated and it will be a copy of the version in the services directory as of the last image rebuild.

    +
  • +
+

making your own Python script the default

+

Suppose the Python script you have been developing reaches a major milestone and you decide to "freeze dry" your work up to that point so that it becomes the default when you ask for a clean slate. Proceed like this:

+
    +
  1. +

    If you have added any packages by following the steps in adding packages, run the following command:

    +
    $ docker exec python bash -c 'pip3 freeze >requirements.txt'
    +
    +

    That generates a requirements.txt representing the state of play inside the running container. Because it is running inside the container, the requirements.txt created by that command appears outside the container at:

    +
    ~/IOTstack/volumes/python/app/requirements.txt
    +
    +
  2. +
  3. +

    Make your work the default:

    +
    $ cd ~/IOTstack
    +$ cp -r ./volumes/python/app/* ./services/python/app
    +
    +

    The cp command copies:

    +
      +
    • your Python script;
    • +
    • the optional requirements.txt (from step 1); and
    • +
    • any other files you may have put into the Python working directory.
    • +
    +

    Key point:

    +
      +
    • everything copied into ./services/python/app will become part of the new local image.
    • +
    +
  4. +
  5. +

    Terminate the Python container and erase its persistent storage area:

    +
    $ cd ~/IOTstack
    +$ docker-compose down python
    +$ sudo rm -rf ./volumes/python
    +
    +

    Note:

    +
      +
    • +

      If erasing the persistent storage area feels too risky, just move it out of the way:

      +
      $ cd ~/IOTstack/volumes
      +$ sudo mv python python.off
      +
      +
    • +
    +
  6. +
  7. +

    Rebuild the local image:

    +
    $ cd ~/IOTstack
    +$ docker-compose build --force-rm python
    +$ docker-compose up -d --force-recreate python
    +
    +

    On its first launch, the new container will re-populate the persistent storage area but, this time, it will be your Python script and any other supporting files, rather than the original "hello world" script.

    +
  8. +
  9. +

    Clean up by removing the old local image:

    +
    $ docker system prune -f
    +
    +
  10. +
+

canning your project

+

Suppose your project has reached the stage where you wish to put it into production as a service under its own name. Make two further assumptions:

+
    +
  1. You have gone through the steps in making your own Python script the default and you are certain that the content of ./services/python/app correctly captures your project.
  2. +
  3. You want to give your project the name "wishbone".
  4. +
+

Proceed like this:

+
    +
  1. +

    Stop the development project:

    +
    $ cd ~/IOTstack
    +$ docker-compose down python
    +
    +
  2. +
  3. +

    Remove the existing local image:

    +
    $ docker rmi iotstack_python
    +
    +
  4. +
  5. +

    Rename the python services directory to the name of your project:

    +
    $ cd ~/IOTstack/services
    +$ mv python wishbone
    +
    +
  6. +
  7. +

    Edit the python service definition in docker-compose.yml and replace references to python with the name of your project. In the following, the original is on the left, the edited version on the right, and the lines that need to change are indicated with a "|":

    +
    python:                                  |  wishbone:
    +  container_name: python                 |    container_name: wishbone
    +  build: ./services/python/.             |    build: ./services/wishbone/.
    +  restart: unless-stopped                     restart: unless-stopped
    +  environment:                                environment:
    +    - TZ=Etc/UTC                                - TZ=Etc/UTC
    +    - IOTSTACK_UID=1000                         - IOTSTACK_UID=1000
    +    - IOTSTACK_GID=1000                         - IOTSTACK_GID=1000
    +  # ports:                                    # ports:
    +  #   - "external:internal"                   #   - "external:internal"
    +  volumes:                                    volumes:
    +    - ./volumes/python/app:/usr/src/app  |      - ./volumes/wishbone/app:/usr/src/app
    +
    +

    Note:

    +
      +
    • if you make a copy of the python service definition and then perform the required "wishbone" edits on the copy, the python definition will still be active so docker-compose may try to bring up both services. You will eliminate the risk of confusing yourself if you follow these instructions "as written" by not leaving the python service definition in place.
    • +
    +
  8. +
  9. +

    Start the renamed service:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d wishbone
    +
    +
  10. +
+

Remember:

+
    +
  • +

    After you have done this, the persistent storage area will be at the path:

    +
    ~/IOTstack/volumes/wishbone/app
    +
    +
  • +
+

routine maintenance

+

To make sure you are running from the most-recent base image of Python from Dockerhub:

+
$ cd ~/IOTstack
+$ docker-compose build --no-cache --pull python
+$ docker-compose up -d python
+$ docker system prune -f
+$ docker system prune -f
+
+

In words:

+
    +
  1. Be in the right directory.
  2. +
  3. Force docker-compose to download the most-recent version of the Python base image from Dockerhub, and then run the Dockerfile to build a new local image.
  4. +
  5. Instantiate the newly-built local image.
  6. +
  7. Remove the old local image.
  8. +
  9. Remove the old base image
  10. +
+

The old base image can't be removed until the old local image has been removed, which is why the prune command needs to be run twice.

+

Note:

+
    +
  • If you have followed the steps in canning your project and your service has a name other than python, just substitute the new name where you see python in the two dockerc-compose commands.
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/RTL_433-docker/index.html b/Containers/RTL_433-docker/index.html new file mode 100644 index 000000000..b3de2da93 --- /dev/null +++ b/Containers/RTL_433-docker/index.html @@ -0,0 +1,2157 @@ + + + + + + + + + + + + + + + + + + + + + + + + + RTL_433 Docker - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

RTL_433 Docker

+

Requirements, you will need to have a SDR dongle for you to be able to use RTL. I've tested this with a RTL2838

+

Make sure you can see your receiver by running lsusb

+
$ lsusb
+Bus 003 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub
+Bus 002 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub
+Bus 001 Device 004: ID 0bda:2838 Realtek Semiconductor Corp. RTL2838 DVB-T
+Bus 001 Device 002: ID 2109:3431 VIA Labs, Inc. Hub
+Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub
+
+

Before starting the container please install RTL_433 from the native installs menu. This will setup your environment with the correct variables and programs. It is also advised to run RTL_433 to verify that it is working correctly on your system.

+

The container is designed to send all detected messages over mqtt

+

Edit the IOTstack/services/rtl_433/rtl_433.env file with your relevant settings for your mqtt server: +

MQTT_ADDRESS=mosquitto
+MQTT_PORT=1833
+#MQTT_USER=myuser
+#MQTT_PASSWORD=mypassword
+MQTT_TOPIC=RTL_433
+

+

the container starts with the command rtl_433 -F mqtt:.... currently it does not filter any packets, you will need to do this in Node-RED

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Ring-MQTT/index.html b/Containers/Ring-MQTT/index.html new file mode 100644 index 000000000..53e32f8f1 --- /dev/null +++ b/Containers/Ring-MQTT/index.html @@ -0,0 +1,2404 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Ring-MQTT - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Ring-MQTT

+

References

+ +

Getting started

+
    +
  1. +

    Be in the correct directory (assumed throughout):

    +
    $ cd ~/IOTstack
    +
    +
  2. +
  3. +

    Run the IOTstack menu and choose ring-mqtt. An alternative to running the menu is to append the service definition template to your compose file like this:

    +
    $ sed -e "s/^/  /" ./.templates/ring-mqtt/service.yml >>docker-compose.yml
    +
    +
    +

    The sed command is required because service definition templates are left-shifted by two spaces.

    +
    +
  4. +
  5. +

    This step is optional. Use a text editor to open your docker-compose.yml file:

    +
      +
    • find the ring-mqtt service definition;
    • +
    • change the TZ environment variable to your time-zone;
    • +
    • save your work.
    • +
    +
  6. +
  7. +

    Bring up the container:

    +
    $ docker-compose up -d ring-mqtt
    +
    +

    This pulls the image from DockerHub, instantiates the container, and initialises its persistent storage.

    +
  8. +
  9. +

    Use sudo and a text editor to open the configuration file at the path. For example:

    +
    $ sudo vi ./volumes/ring-mqtt/data/config.json
    +
    +

    At the time of writing, the default configuration file looked like this:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    {
    +    "mqtt_url": "mqtt://localhost:1883",
    +    "mqtt_options": "",
    +    "livestream_user": "",
    +    "livestream_pass": "",
    +    "disarm_code": "",
    +    "enable_cameras": false,
    +    "enable_modes": false,
    +    "enable_panic": false,
    +    "hass_topic": "homeassistant/status",
    +    "ring_topic": "ring",
    +    "location_ids": [
    +        ""
    +    ]
    +}
    +
    +

    From the perspective of any process running in a Docker container, localhost means "this container" rather than "this Raspberry Pi". You need to edit line 2 to point to your MQTT broker:

    +
      +
    • +

      If the ring-mqtt container and your mosquitto container are running on the same Raspberry Pi:

      +
      2
      "mqtt_url": "mqtt://mosquitto:1883",
      +
      +
    • +
    • +

      Otherwise, replace localhost with the IP address or domain name of the host where your MQTT broker is running. For example:

      +
      2
      "mqtt_url": "mqtt://192.168.0.100:1883",
      +
      +
    • +
    • +

      If your MQTT broker is protected by a username and password, refer to the Ring-MQTT Wiki for the correct syntax.

      +
    • +
    +

    Save your work then restart the container:

    +
    $ docker-compose restart ring-mqtt
    +
    +
  10. +
  11. +

    Launch your browser (eg Chrome, Firefox, Safari) and open the following URL:

    +
    http://«ip-or-name»:55123
    +
    +

    where «ip-or-name» is the IP address or domain name of the Raspberry Pi running your ring-mqtt container. Examples:

    +
      +
    • http://192.168.1.100:55123
    • +
    • http://iot-hub.my.domain.com:55123
    • +
    • http://iot-hub.local:55123
    • +
    +

    You should see the following screen:

    +

    Ring-MQTT web UI

    +

    Follow the instructions on the screen to generate your refresh token.

    +
  12. +
  13. +

    Check the logs:

    +
    $ docker logs ring-mqtt
    +
    +

    Unless you see errors being reported, your ring-mqtt container should be ready.

    +
  14. +
+

Environment variables

+

The default service definition includes two environment variables:

+
environment:
+- TZ=Etc/UTC
+- DEBUG=ring-*
+
+
    +
  • TZ= should be set to your local time zone (explained above).
  • +
  • DEBUG=ring-* ("all debugging options enabled") is the default for ring-mqtt when running in a container. It is included as a placeholder if you want to tailor debugging output. Refer to the Ring-MQTT Wiki.
  • +
+

Whenever you change an environment variable, run:

+
$ cd ~/IOTstack
+$ docker-compose up -d ring-mqtt
+
+

The "up" causes docker-compose to notice the configuration change and re-create the container.

+

Configuration

+

Consult the Ring-MQTT Wiki.

+

Maintenance

+

Periodically:

+
$ cd ~/IOTstack
+$ docker-compose pull ring-mqtt
+
+

If a new image comes down from DockerHub:

+
$ docker-compose up -d ring-mqtt
+$ docker system prune -f
+
+

The "up" instantiates the newly-downloaded image as the running container. The "prune" cleans up the older image.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Scrypted/index.html b/Containers/Scrypted/index.html new file mode 100644 index 000000000..ef7c112c8 --- /dev/null +++ b/Containers/Scrypted/index.html @@ -0,0 +1,2326 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Scrypted - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Scrypted – home video integration platform

+

References

+ +

Getting started

+
    +
  1. Run the IOTstack menu and select "Scrypted" so that the service definition is added to your compose file.
  2. +
  3. +

    Before starting the container for the first time, run the following commands:

    +
    $ cd ~/IOTstack
    +$ echo "SCRYPTED_WEBHOOK_UPDATE_AUTHORIZATION=$(cat /proc/sys/kernel/random/uuid | md5sum | head -c 24)" >>.env
    +
    +

    This generates a random token and places it in ~/IOTstack/.env.

    +

    Notes:

    +
      +
    1. You only need to do this once.
    2. +
    3. It is not clear whether the token is respected on every launch, or only on first launch.
    4. +
    +
  4. +
  5. +

    Start Scrypted:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d scrypted
    +
    +

    Note:

    +
      +
    • scrypted is a large image (2.5GB). It takes time to download and decompress!
    • +
    +
  6. +
  7. +

    Use the following URL as a template:

    +
    https://«host-or-ip»:10443
    +
    +

    Replace «host-or-ip» with the domain name or IP address of your Raspberry Pi. Examples:

    +
      +
    • https://raspberrypi.my.domain.com:10443
    • +
    • https://raspberrypi.local:10443
    • +
    • https://192.168.1.10:10443
    • +
    +

    Note:

    +
      +
    • You can't use the http protocol. You must use https.
    • +
    +
  8. +
  9. +

    Paste the URL into a browser window. The container uses a self-signed certificate so you will need to accept that using your browser's mechanisms.

    +
  10. +
  11. Enter a username and password to create your administrator account.
  12. +
+

Troubleshooting

+

If you see the message:

+
required variable SCRYPTED_WEBHOOK_UPDATE_AUTHORIZATION is missing a value: see instructions for generating a token
+
+

it means that you did not complete step 2 before starting the container. Go back and perform step 2.

+

If you need to start over from scratch:

+
$ cd ~/IOTstack
+$ docker-compose down scrypted
+$ sudo rm -rf ./volumes/scrypted
+$ docker-compose up -d scrypted
+
+
+

see also if downing a container doesn't work

+
+

About the service definition

+

The Scrypted container runs in host mode, which means it binds directly to the Raspberry Pi's ports. The service definition includes:

+
x-ports:
+- "10443:10443"
+
+

The effect of the x- prefix is to comment-out that port mapping. It is included as an aide-memoire to help you remember the port number.

+

The service definition also includes the following environment variable:

+
- SCRYPTED_WEBHOOK_UPDATE=http://localhost:10444/v1/update
+
+

The container does not bind to port 10444 so the purpose of this is not clear. The port number should be treated as reserved.

+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Syncthing/index.html b/Containers/Syncthing/index.html new file mode 100644 index 000000000..5a4ff7f81 --- /dev/null +++ b/Containers/Syncthing/index.html @@ -0,0 +1,2276 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Syncthing - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Syncthing

+

Syncthing is a continuous file synchronization program. It synchronizes files between two or more computers in real time, safely protected from prying eyes. Your data is your data alone and you deserve to choose where it is stored, whether it is shared with some third party, and how it's transmitted over the internet.

+

Forget about using propietary solutions and take control of your data. Syncthing is an open source solution for synchronizing your data in a p2p way.

+

References

+ +

Web interface

+

The web UI can be found on yourip:8384

+

Data & volumes

+

Configuration data is available under /config containers directroy and mapped to ./volumes/syncthing/config .

+

The /app directory is inside the container, on the host you will use ./volumes/syncthing/data. +The default share is named Sync. Other added folders will also appear under data.

+

Ports

+

Have a look at ~/IOTStack/.templates/syncthing/service.yml or linuxserve docker documentation, by the way, used ports are;

+
    ports:
+      - 8384:8384 # Web UI
+      - 22000:22000/tcp # TCP file transfers
+      - 22000:22000/udp # QUIC file transfers
+      - 21027:21027/udp # Receive local discovery broadcasts
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/TasmoAdmin/index.html b/Containers/TasmoAdmin/index.html new file mode 100644 index 000000000..3cc5da471 --- /dev/null +++ b/Containers/TasmoAdmin/index.html @@ -0,0 +1,2239 @@ + + + + + + + + + + + + + + + + + + + + + + + + + TasmoAdmin - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

TasmoAdmin

+

References

+ +

Web interface

+

The web UI can be found on "your_ip":8088

+

Usage

+

(instructions to follow)

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Telegraf/index.html b/Containers/Telegraf/index.html new file mode 100644 index 000000000..63431dd69 --- /dev/null +++ b/Containers/Telegraf/index.html @@ -0,0 +1,2832 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Telegraf - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Telegraf

+

This document discusses an IOTstack-specific version of Telegraf built on top of influxdata/influxdata-docker/telegraf using a Dockerfile.

+

The purpose of the Dockerfile is to:

+
    +
  • tailor the default configuration to be IOTstack-ready; and
  • +
  • enable the container to perform self-repair if essential elements of the persistent storage area disappear.
  • +
+

References

+ +

Significant directories and files

+
~/IOTstack
+├── .templates
+│   └── telegraf
+│       ├── Dockerfile ❶
+│       ├── entrypoint.sh ❷
+│       ├── iotstack_defaults
+│       │   ├── additions ❸
+│       │   └── auto_include ❹
+│       └── service.yml ❺
+├── services
+│   └── telegraf
+│       └── service.yml ❻
+├── docker-compose.yml
+└── volumes
+    └── telegraf ❼
+        ├── additions ❽
+        ├── telegraf-reference.conf ➒
+        └── telegraf.conf ➓
+
+
    +
  1. The Dockerfile used to customise Telegraf for IOTstack.
  2. +
  3. A replacement for the telegraf container script of the same name, extended to handle container self-repair.
  4. +
  5. The additions folder. See Applying optional additions.
  6. +
  7. The auto_include folder. Additions automatically applied to + telegraf.conf. See Automatic includes to telegraf.conf.
  8. +
  9. The template service definition.
  10. +
  11. The working service definition (only relevant to old-menu, copied from ❹).
  12. +
  13. The persistent storage area for the telegraf container.
  14. +
  15. A working copy of the additions folder (copied from ❸). See Applying optional additions.
  16. +
  17. The reference configuration file. See Changing Telegraf's configuration.
  18. +
  19. The active configuration file. A subset of ➒ altered to support communication with InfluxDB running in a container in the same IOTstack instance.
  20. +
+

Everything in the persistent storage area ❼:

+
    +
  • will be replaced if it is not present when the container starts; but
  • +
  • will never be overwritten if altered by you.
  • +
+

How Telegraf gets built for IOTstack

+

IOTstack menu

+

When you select Telegraf in the IOTstack menu, the template service definition is copied into the Compose file.

+
+

Under old menu, it is also copied to the working service definition and then not really used.

+
+

IOTstack first run

+

On a first install of IOTstack, you run the menu, choose your containers, and are told to do this:

+
$ cd ~/IOTstack
+$ docker-compose up -d
+
+
+

See also the Migration considerations (below).

+
+

docker-compose reads the Compose file. When it arrives at the telegraf fragment, it finds:

+
  telegraf:
+    container_name: telegraf
+    build: ./.templates/telegraf/.
+    
+
+

The build statement tells docker-compose to look for:

+
~/IOTstack/.templates/telegraf/Dockerfile
+
+
+

The Dockerfile is in the .templates directory because it is intended to be a common build for all IOTstack users. This is different to the arrangement for Node-RED where the Dockerfile is in the services directory because it is how each individual IOTstack user's version of Node-RED is customised.

+
+

The Dockerfile begins with:

+
FROM telegraf:latest
+
+
+

If you need to pin to a particular version of Telegraf, the Dockerfile is the place to do it. See Telegraf version pinning.

+
+

The FROM statement tells the build process to pull down the base image from DockerHub.

+
+

It is a base image in the sense that it never actually runs as a container on your Raspberry Pi.

+
+

The remaining instructions in the Dockerfile customise the base image to produce a local image. The customisations are:

+
    +
  1. Add the rsync package. This helps the container perform self-repair.
  2. +
  3. Copy the default configuration file that comes with the DockerHub image (so it will be available as a fully-commented reference for the user) and make it read-only.
  4. +
  5. Make a working version of the default configuration file from which comment lines and blank lines have been removed.
  6. +
  7. Patch the working version to support communications with InfluxDB running in another container in the same IOTstack instance.
  8. +
  9. +

    Replace entrypoint.sh with a version which:

    +
      +
    • calls rsync to perform self-repair if telegraf.conf goes missing; and
    • +
    • enforces root:root ownership in ~/IOTstack/volumes/telegraf.
    • +
    +
  10. +
+

The local image is instantiated to become your running container.

+

When you run the docker images command after Telegraf has been built, you may see two rows for Telegraf:

+
$ docker images
+REPOSITORY          TAG      IMAGE ID       CREATED       SIZE
+iotstack_telegraf   latest   59861b7fe9ed   2 hours ago   292MB
+telegraf            latest   a721ac170fad   3 days ago    273MB
+
+
    +
  • telegraf is the base image; and
  • +
  • iotstack_telegraf is the local image.
  • +
+

You may see the same pattern in Portainer, which reports the base image as "unused". You should not remove the base image, even though it appears to be unused.

+
+

Whether you see one or two rows depends on the version of docker-compose you are using and how your version of docker-compose builds local images.

+
+

Migration considerations

+

Under the original IOTstack implementation of Telegraf (just "as it comes" from DockerHub), the service definition expected telegraf.conf to be at:

+
~/IOTstack/services/telegraf/telegraf.conf
+
+

Under this implementation of Telegraf, the configuration file has moved to:

+
~/IOTstack/volumes/telegraf/telegraf.conf
+
+
+

The change of location is one of the things that allows self-repair to work properly.

+
+

With one exception, all prior and current versions of the default configuration file are identical in terms of their semantics.

+
+

In other words, once you strip away comments and blank lines, and remove any "active" configuration options that simply repeat their default setting, you get the same subset of "active" configuration options. The default configuration file supplied with gcgarner/IOTstack is available here if you wish to refer to it.

+
+

The exception is [[inputs.mqtt_consumer]] which is now provided as an optional addition. If your existing Telegraf configuration depends on that input, you will need to apply it. See applying optional additions.

+

Logging

+

You can inspect Telegraf's log by:

+
$ docker logs telegraf
+
+

These logs are ephemeral and will disappear when your Telegraf container is rebuilt.

+

log message: database "telegraf" creation failed

+

The following log message can be misleading:

+
W! [outputs.influxdb] When writing to [http://influxdb:8086]: database "telegraf" creation failed: Post "http://influxdb:8086/query": dial tcp 172.30.0.9:8086: connect: connection refused
+
+

If InfluxDB is not running when Telegraf starts, the depends_on: clause in Telegraf's service definition tells Docker to start InfluxDB (and Mosquitto) before starting Telegraf. Although it can launch the InfluxDB container first, Docker has no way of knowing when the influxd process running inside the InfluxDB container will start listening to port 8086.

+

What this error message usually means is that Telegraf has tried to communicate with InfluxDB before the latter is ready to accept connections. Telegraf typically retries after a short delay and is then able to communicate with InfluxDB.

+

Changing Telegraf's configuration

+

The first time you launch the Telegraf container, the following structure will be created in the persistent storage area:

+
~/IOTstack/volumes/telegraf
+├── [drwxr-xr-x root    ]  additions
+│   └── [-rw-r--r-- root    ]  inputs.mqtt_consumer.conf
+├── [-rw-r--r-- root    ]  telegraf.conf
+└── [-r--r--r-- root    ]  telegraf-reference.conf
+
+

The file:

+
    +
  • +

    telegraf-reference.conf:

    +
      +
    • is a reference copy of the default configuration file that ships with the base image for Telegraf when it is downloaded from DockerHub. It is nearly 9000 lines long and is mostly comments.
    • +
    • is not used by Telegraf but will be replaced if you delete it.
    • +
    • is marked "read-only" (even for root) as a reminder that it is only for your reference. Any changes you make will be ignored.
    • +
    +
  • +
  • +

    telegraf.conf:

    +
      +
    • is created by removing all comment lines and blank lines from telegraf-reference.conf, leaving only the "active" configuration options, and then adding options necessary for IOTstack.
    • +
    • is less than 30 lines and is significantly easier to understand than telegraf-reference.conf.
    • +
    +
  • +
  • +

    inputs.mqtt_consumer.conf – see Applying optional additions below.

    +
  • +
+

The intention of this structure is that you:

+
    +
  1. search telegraf-reference.conf to find the configuration option you need;
  2. +
  3. read the comments to understand what the option does and how to use it; and then
  4. +
  5. import the option into the correct section of telegraf.conf.
  6. +
+

When you make a change to telegraf.conf, you activate it by restarting the container:

+
$ cd ~/IOTstack
+$ docker-compose restart telegraf
+
+

Automatic includes to telegraf.conf

+
    +
  • +

    inputs.docker.conf instructs Telegraf to collect metrics from Docker. Requires kernel control + groups to be enabled to collect memory usage data. If not done during initial installation, + enable by running (reboot required):

    +
    $ CMDLINE="/boot/firmware/cmdline.txt" && [ -e "$CMDLINE" ] || CMDLINE="/boot/cmdline.txt"
    +$ echo $(cat "$CMDLINE") cgroup_memory=1 cgroup_enable=memory | sudo tee "$CMDLINE"
    +
    +
  • +
  • +

    inputs.cpu_temp.conf collects cpu temperature.

    +
  • +
+

Applying optional additions

+

The additions folder (see Significant directories and files) is a mechanism for additional IOTstack-ready configuration options to be provided for Telegraf.

+

Currently there is one addition:

+
    +
  1. inputs.mqtt_consumer.conf which formed part of the gcgarner/IOTstack telegraf configuration and instructs Telegraf to subscribe to a metric feed from the Mosquitto broker. This assumes, of course, that something is publishing those metrics.
  2. +
+

Using inputs.mqtt_consumer.conf as the example, applying that addition to +your Telegraf configuration file involves:

+
$ cd ~/IOTstack/volumes/telegraf
+$ grep -v "^#" additions/inputs.mqtt_consumer.conf | sudo tee -a telegraf.conf >/dev/null
+$ cd ~/IOTstack
+$ docker-compose restart telegraf
+
+

The grep strips comment lines and the sudo tee is a safe way of appending the result to telegraf.conf. The restart causes Telegraf to notice the change.

+

Getting a clean slate

+

Erasing the persistent storage area

+

Erasing Telegraf's persistent storage area triggers self-healing and restores known defaults:

+
$ cd ~/IOTstack
+$ docker-compose down telegraf
+$ sudo rm -rf ./volumes/telegraf
+$ docker-compose up -d telegraf
+
+

Notes:

+
    +
  • +

    You can also remove individual files within the persistent storage area and then trigger self-healing. For example, if you decide to edit telegraf-reference.conf and make a mess, you can restore the original version like this:

    +
    $ cd ~/IOTstack
    +$ sudo rm ./volumes/telegraf/telegraf-reference.conf
    +$ docker-compose restart telegraf
    +
    +
  • +
  • +

    See also if downing a container doesn't work

    +
  • +
+

Resetting the InfluxDB database

+

To reset the InfluxDB database that Telegraf writes into, proceed like this:

+
$ cd ~/IOTstack
+$ docker-compose down telegraf
+$ docker exec -it influxdb influx -precision=rfc3339
+> drop database telegraf
+> exit
+$ docker-compose up -d telegraf
+
+

In words:

+
    +
  • Be in the right directory.
  • +
  • Stop the Telegraf container (while leaving the InfluxDB container running). See also if downing a container doesn't work.
  • +
  • Launch the Influx CLI inside the InfluxDB container.
  • +
  • Delete the telegraf database, and then exit the CLI.
  • +
  • Start the Telegraf container. This re-creates the database automatically.
  • +
+

Upgrading Telegraf

+

You can update most containers like this:

+
$ cd ~/IOTstack
+$ docker-compose pull
+$ docker-compose up -d
+$ docker system prune
+
+

In words:

+
    +
  • docker-compose pull downloads any newer images;
  • +
  • docker-compose up -d causes any newly-downloaded images to be instantiated as containers (replacing the old containers); and
  • +
  • the prune gets rid of the outdated images.
  • +
+

This strategy doesn't work when a Dockerfile is used to build a local image on top of a base image downloaded from DockerHub. The local image is what is running so there is no way for the pull to sense when a newer version becomes available.

+

The only way to know when an update to Telegraf is available is to check the Telegraf tags page on DockerHub.

+

Once a new version appears on DockerHub, you can upgrade Telegraf like this:

+
$ cd ~/IOTstack
+$ docker-compose build --no-cache --pull telegraf
+$ docker-compose up -d telegraf
+$ docker system prune
+$ docker system prune
+
+

Breaking it down into parts:

+
    +
  • build causes the named container to be rebuilt;
  • +
  • --no-cache tells the Dockerfile process that it must not take any shortcuts. It really must rebuild the local image;
  • +
  • --pull tells the Dockerfile process to actually check with DockerHub to see if there is a later version of the base image and, if so, to download it before starting the build;
  • +
  • telegraf is the named container argument required by the build command.
  • +
+

Your existing Telegraf container continues to run while the rebuild proceeds. Once the freshly-built local image is ready, the up tells docker-compose to do a new-for-old swap. There is barely any downtime for your service.

+

The prune is the simplest way of cleaning up. The first call removes the old local image. The second call cleans up the old base image. Whether an old base image exists depends on the version of docker-compose you are using and how your version of docker-compose builds local images.

+

Telegraf version pinning

+

If you need to pin Telegraf to a particular version:

+
    +
  1. +

    Use your favourite text editor to open the following file:

    +
    ~/IOTstack/.templates/telegraf/Dockerfile
    +
    +
  2. +
  3. +

    Find the line:

    +
    FROM telegraf:latest
    +
    +
  4. +
  5. +

    Replace latest with the version you wish to pin to. For example, to pin to version 1.19.3:

    +
    FROM telegraf:1.19.3
    +
    +
  6. +
  7. +

    Save the file and tell docker-compose to rebuild the local image:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d --build telegraf
    +$ docker system prune
    +
    +

    The new local image is built, then the new container is instantiated based on that image. The prune deletes the old local image.

    +
  8. +
+

Note:

+
    +
  • As well as preventing Docker from updating the base image, pinning will also block incoming updates to the Dockerfile from a git pull. Nothing will change until you decide to remove the pin.
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Timescaledb/index.html b/Containers/Timescaledb/index.html new file mode 100644 index 000000000..bd8a2b984 --- /dev/null +++ b/Containers/Timescaledb/index.html @@ -0,0 +1,2194 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Timescaledb - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Timescaledb

+ +

Default port changed

+

In order to avoid port conflict with PostgreSQL, the public database port is +mapped to 5433 using Docker.

+

Cross-container access from other containers still works as previously: +timescaledb:5432.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/WireGuard/index.html b/Containers/WireGuard/index.html new file mode 100644 index 000000000..f20846a8d --- /dev/null +++ b/Containers/WireGuard/index.html @@ -0,0 +1,3310 @@ + + + + + + + + + + + + + + + + + + + + + + + + + WireGuard - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

WireGuard

+

WireGuard is a fast, modern, secure Virtual Private Network (VPN) tunnel. It can securely connect you to your home network, allowing you to access your home network's local services from anywhere. It can also secure your traffic when using public internet connections.

+

Reference:

+ +

Assumptions:

+
    +
  • These instructions assume that you have privileges to configure your network's gateway (router). If you are not able to make changes to your network's firewall settings, then you will not be able to finish this setup.
  • +
  • In common with most VPN technologies, WireGuard assumes that the WAN side of your network's gateway has a public IP address which is reachable directly. WireGuard may not work if that assumption does not hold. If you strike this problem, read ZeroTier vs WireGuard.
  • +
+

Installing WireGuard under IOTstack

+

You increase your chances of a trouble-free installation by performing the installation steps in the following order.

+

Step 1: Update your Raspberry Pi OS

+

To be able to run WireGuard successfully, your Raspberry Pi needs to be fully up-to-date. If you want to understand why, see the read only flag.

+
$ sudo apt update
+$ sudo apt upgrade -y
+
+

Step 2: Set up a Dynamic DNS name

+

Before you can use WireGuard (or any VPN solution), you need a mechanism for your remote clients to reach your home router. You have two choices:

+
    +
  1. Obtain a permanent IP address for your home router from your Internet Service Provider (ISP). Approach your ISP if you wish to pursue this option. It generally involves additional charges.
  2. +
  3. Use a Dynamic DNS service. See IOTstack documentation Accessing your device from the internet. The rest of this documentation assumes you have chosen this option.
  4. +
+

Step 3: Understand the Service Definition

+

This is the service definition template that IOTstack uses for WireGuard:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
wireguard:
+  container_name: wireguard
+  image: ghcr.io/linuxserver/wireguard
+  restart: unless-stopped
+  environment:
+    - PUID=1000
+    - PGID=1000
+    - TZ=${TZ:-Etc/UTC}
+    - SERVERURL=your.dynamic.dns.name
+    - SERVERPORT=51820
+    - PEERS=laptop,phone,tablet
+    - PEERDNS=auto
+    - ALLOWEDIPS=0.0.0.0/0
+  ports:
+    - "51820:51820/udp"
+  volumes:
+    - ./volumes/wireguard/config:/config
+    - ./volumes/wireguard/custom-cont-init.d:/custom-cont-init.d
+    - ./volumes/wireguard/custom-services.d:/custom-services.d
+  cap_add:
+    - NET_ADMIN
+  sysctls:
+    - net.ipv4.conf.all.src_valid_mark=1
+
+

Unfortunately, that service definition will not work "as is". It needs to be configured.

+

Key points:

+
    +
  • Everything in the environment: section from SERVERURL= down to PEERDNS= (inclusive) affects WireGuard's generated configurations (the QR codes). In other words, any time you change any of those values, any existing QR codes will stop working.
  • +
+

Step 4: Decide what to configure

+

With most containers, you can continue to tweak environment variables and settings without upsetting the container's basic behaviour. WireGuard is a little different. You really need to think, carefully, about how you want to configure the service before you start. If you change your mind later, you generally have to start from a clean slate.

+

Fields that you should always configure

+
    +
  • +

    SERVERURL= should be set to the domain name you have registered with a Dynamic DNS service provider. Example:

    +
    - SERVERURL=downunda.duckdns.org
    +
    +
  • +
  • +

    PEERS= should be a comma-separated list of your client devices (all the phones, tablets, laptops, desktops you want to use remotely to get back into your home network). Example:

    +
    - PEERS=jillMacbook,jackChromebook,alexNokiaG10
    +
    +

    Notes:

    +
      +
    • Many examples on the web use "PEERS=n" where "n" is a number. In practice, that approach seems to be a little fragile and is not recommended for IOTstack.
    • +
    • Each name needs to start with a letter and be followed by one or more letters and/or digits. Letters can be upper- or lower-case. Do not use any other characters.
    • +
    +
  • +
+

Optional configuration - DNS resolution for peers

+

You have several options for how your remote peers resolve DNS requests:

+
    +
  • +

    PEERDNS=auto

    +

    DNS queries made on connected WireGuard clients should work as if they were made on the host. If you configure your ad-blocker into the host's resolveconf.conf, Wireguard clients will also automatically use it.

    +

    Details:

    +
      +
    • The default value of auto instructs the WireGuard service running within the WireGuard container to use a DNS-service, coredns, also running in the Wireguard container. Coredns by default directs queries to 127.0.0.11, which Docker intercepts and forwards to whichever resolvers are specified in the Raspberry Pi's /etc/resolv.conf.
    • +
    +
  • +
  • +

    PEERDNS=auto with custom-cont-init

    +

    This configuration instructs WireGuard to forward DNS queries from remote peers to any host daemon or container which is listening on port 53. This is the option you will want to choose if you are running an ad-blocking DNS server (eg PiHole or AdGuardHome) in a container on the same host as WireGuard, and you want your remote clients to obtain DNS resolution via the ad-blocker, but don't want your Raspberry Pi host to use it.

    +
    +

    Acknowledgement: thanks to @ukkopahis for developing this option.

    +
    +

    To activate this feature:

    +
      +
    1. Make sure your WireGuard service definition contains PEERDNS=auto.
    2. +
    3. +

      Start the WireGuard container by executing:

      +
      $ cd ~/IOTstack
      +$ docker-compose up -d wireguard
      +
      +

      This ensures that the ~/IOTstack/volumes/wireguard folder structure is created and remote client configurations are (re)generated properly.

      +
    4. +
    5. +

      Run the following commands:

      +
      $ cd ~/IOTstack
      +$ sudo cp ./.templates/wireguard/use-container-dns.sh ./volumes/wireguard/custom-cont-init.d/
      +$ docker-compose restart wireguard
      +
      +

      The presence of use-container-dns.sh causes WireGuard to redirect incoming DNS queries to the default gateway on the internal bridged network. That, in turn, results in the queries being forwarded to any other container that is listening for DNS traffic on port 53. It does not matter if that other container is PiHole, AdGuardHome, bind9 or any other kind of DNS server.

      +

      Do note, however, that this configuration creates a dependency between WireGuard and the container providing DNS resolution. You may wish to make that explicit in your docker-compose.yml by adding these lines to your WireGuard service definition:

      +
      depends_on:
      +  - pihole
      +
      +
      +

      Substitute adguardhome or bind9 for pihole, as appropriate.

      +
      +
    6. +
    +

    Once activated, this feature will remain active until you decide to deactivate it. If you ever wish to deactivate it, run the following commands:

    +
    $ cd ~/IOTstack
    +$ sudo rm ./volumes/wireguard/custom-cont-init.d/use-container-dns.sh
    +$ docker-compose restart wireguard
    +
    +
  • +
  • +

    PEERDNS=«ip address»

    +

    A third possibility is if you have a local upstream DNS server. You can specify the IP address of that server so that remote peers receive DNS resolution from that host. For example:

    +
    - PEERDNS=192.168.203.65
    +
    +

    Do note that changes to PEERDNS will not be updated to existing clients, and as such you may want to use PEERDNS=auto unless you have a very specific requirement.

    +
  • +
+

Optional configuration - WireGuard ports

+

The WireGuard service definition template follows the convention of using UDP port "51820" in three places. You can leave it like that and it will just work. There is no reason to change the defaults unless you want to.

+

To understand what each port number does, it is better to think of them like this:

+
environment:
+- SERVERPORT=«public»
+ports:
+- "«external»:«internal»/udp"
+
+

These definitions are going to be used throughout this documentation:

+
    +
  • +

    The «public» port is the port number that your remote WireGuard clients (phone, laptop etc) will try to reach. This is the port number that your router needs to expose to the outside world.

    +
  • +
  • +

    The «external» port is the port number that Docker, running on your Raspberry Pi, will be listening on. Your router needs to forward WireGuard incoming traffic to the «external» port on your Raspberry Pi.

    +
  • +
  • +

    The «internal» port is the port number that WireGuard (the server process) will be listening on inside the WireGuard container. Docker handles forwarding between the «external» and «internal» port.

    +
  • +
+

Rule #1:

+
    +
  • You can change the «public» and «external» ports but you can't change the «internal» port unless you are prepared to do a lot more work.
  • +
+

Rule #2:

+
    +
  • The «public» port forms part of the QR codes. If you decide to change the «public» port after you generate the QR codes, you will have to start over from a clean slate.
  • +
+

Rule #3:

+
    +
  • Your router needs to know about both the «public» and «external» ports so, if you decide to change either of those, you must also reconfigure your router.
  • +
+

See Understanding WireGuard's port numbers if you want more information on how the various port numbers are used.

+

Step 5: Configure WireGuard

+

There are two approaches:

+
    +
  1. Let the menu generate a docker-compose.yml with the default WireGuard service definition template, and then edit docker-compose.yml.
  2. +
  3. Prepare a compose-override.yml file, then run the menu and have it perform the substitutions for you.
  4. +
+

Of the two, the first is generally the simpler and means you don't have to re-run the menu whenever you want to change WireGuard's configuration.

+

Method 1: Configure WireGuard by editing docker-compose.yml

+
    +
  1. +

    Run the menu:

    +
    $ cd ~/IOTstack
    +$ ./menu.sh
    +
    +
  2. +
  3. +

    Choose the "Build Stack" option.

    +
  4. +
  5. If WireGuard is not already selected, select it.
  6. +
  7. Press enter to begin the build.
  8. +
  9. Choose Exit.
  10. +
  11. Open docker-compose.yml in your favourite text editor.
  12. +
  13. Navigate to the WireGuard service definition.
  14. +
  15. Implement the decisions you took in decide what to configure.
  16. +
  17. Save your work.
  18. +
+

Method 2: Configure WireGuard using compose-override.yml

+

The Custom services and overriding default settings for IOTstack page describes how to use an override file to allow the menu to incorporate your custom configurations into the final docker-compose.yml file.

+

You will need to create the compose-override.yml before running the menu to build your stack. If you have already built your stack, you'll have to rebuild it after creating compose-override.yml.

+
    +
  1. +

    Use your favourite text editor to create (or open) the override file. The file is expected to be at the path:

    +
    ~/IOTstack/compose-override.yml
    +
    +
  2. +
  3. +

    Define overrides to implement the decisions you took in Decide what to configure. For example:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    services:
    +  wireguard:
    +    environment:
    +    - PUID=1000
    +    - PGID=1000
    +    - TZ=${TZ:-Etc/UTC}
    +    - SERVERURL=downunda.duckdns.org
    +    - SERVERPORT=51820
    +    - PEERS=laptop,phone,tablet
    +    - PEERDNS=auto
    +    - ALLOWEDIPS=0.0.0.0/0
    +
    +

    Key points:

    +
      +
    • The override file works at the section level. Therefore, you have to include all of the environment variables from the template, not just the ones you want to alter.
    • +
    • If your override file contains configurations for other containers, make sure the file only has a single services: directive at the start.
    • +
    +
  4. +
  5. +

    Save your work.

    +
  6. +
  7. +

    Run the menu:

    +
    $ cd ~/IOTstack
    +$ ./menu.sh
    +
    +
  8. +
  9. +

    Choose the "Build Stack" option.

    +
  10. +
  11. If WireGuard is not already selected, select it.
  12. +
  13. Press enter to begin the build.
  14. +
  15. Choose Exit.
  16. +
  17. +

    Check your work by running:

    +
    $ cat docker-compose.yml
    +
    +

    and verify that the wireguard service definition is as you expect.

    +
  18. +
+

Step 6: Start WireGuard

+
    +
  1. +

    To start WireGuard, bring up your stack:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d
    +
    +
  2. +
  3. +

    Confirm that WireGuard has started properly by running:

    +
    $ docker ps --format "table {{.Names}}\t{{.RunningFor}}\t{{.Status}}" --filter name=wireguard
    +
    +

    Repeat the command a few times with a short delay in between. You are looking for signs that the WireGuard container is restarting. If the container seems to be restarting then this command is your friend:

    +
    $ docker logs wireguard
    +
    +

    See also discussion of the read-only flag.

    +
  4. +
  5. +

    Confirm that WireGuard has generated the expected configurations. For example, given the following setting in docker-compose.yml:

    +
    - PEERS=jillMacbook,jackChromebook,alexNokiaG10
    +
    +

    you would expect a result something like this:

    +
    $ tree ./volumes/wireguard/config
    +./volumes/wireguard/config
    +├── coredns
    +│   └── Corefile
    +├── peer_alexNokiaG10
    +│   ├── peer_alexNokiaG10.conf
    +│   ├── peer_alexNokiaG10.png
    +│   ├── presharedkey-peer_alexNokiaG10
    +│   ├── privatekey-peer_alexNokiaG10
    +│   └── publickey-peer_alexNokiaG10
    +├── peer_jackChromebook
    +│   ├── peer_jackChromebook.conf
    +│   ├── peer_jackChromebook.png
    +│   ├── presharedkey-peer_jackChromebook
    +│   ├── privatekey-peer_jackChromebook
    +│   └── publickey-peer_jackChromebook
    +├── peer_jillMacbook
    +│   ├── peer_jillMacbook.conf
    +│   ├── peer_jillMacbook.png
    +│   ├── presharedkey-peer_jillMacbook
    +│   ├── privatekey-peer_jillMacbook
    +│   └── publickey-peer_jillMacbook
    +├── server
    +│   ├── privatekey-server
    +│   └── publickey-server
    +├── templates
    +│   ├── peer.conf
    +│   └── server.conf
    +└── wg0.conf
    +
    +

    Notice how each element in the PEERS= list is represented by a sub-directory prefixed with peer_. You should expect the same pattern for your peers.

    +
  6. +
+

Step 7: Save your WireGuard client configuration files (QR codes)

+

The first time you launch WireGuard, it generates cryptographically protected configurations for your remote clients and encapsulates those configurations in QR codes. You can see the QR codes by running:

+
$ docker logs wireguard
+
+

WireGuard's log is ephemeral, which means it resets each time the container is re-created. In other words, you can't rely on going back to the log to obtain your QR codes if you lose them.

+

WireGuard also records the QR codes as .png files. In fact, the QR codes shown by docker logs wireguard are just side-effects of the .png files as they are created.

+

If your Raspberry Pi has a GUI (such as a screen attached to an HDMI port or a VNC connection), you can always retrieve the QR codes by opening the .png files in the GUI.

+

If, however, your Raspberry Pi is running headless, you will need to copy the .png files to a system that is capable of displaying them, such as a Mac or PC. You can use SCP to do that.

+
+

See ssh tutorial if you need help setting up SSH (of which SCP is a part).

+
+

For example, to copy all PNG files from your Raspberry Pi to a target system:

+
$ find ~/IOTstack/volumes/wireguard/config -name "*.png" -exec scp {} user@hostorip:. \;
+
+

Note:

+
    +
  • hostorip is the host name, fully-qualified domain name, multicast domain name or IP address of the GUI-capable target computer; and
  • +
  • user is a valid username on the target computer.
  • +
+

If you want to work in the other direction (ie from the GUI-capable system), you can try:

+
$ scp pi@hostorip:IOTstack/volumes/wireguard/peer_jill-macbook/peer_jill-macbook.png .
+
+

In this case:

+
    +
  • hostorip is the host name, fully-qualified domain name, multicast domain name or IP address of the Raspberry Pi that is running WireGuard.
  • +
+

Keep in mind that each QR code contains everything needed for any device to access your home network via WireGuard. Treat your .png files as "sensitive documents".

+

Step 8: Configure your router with a NAT rule

+

A typical home network will have a firewall that effectively blocks all incoming attempts from the Internet to open a new connection with a device on your network.

+

To use a VPN from outside of your home network (which is precisely the point of running the service!), you need to configure your router to allow incoming WireGuard traffic to reach the Raspberry Pi running WireGuard. These instructions assume you have the privileges to do that.

+

If you have not used your router's administrative interface before, the default login credentials may be physically printed on the device or in its instruction manual.

+
+

If you have never changed the default login credentials, you should take the time to do that.

+
+

Routers have wildly different user interfaces but the concepts will be the same. This section describes the basic technique but if you are unsure how to do this on your particular router model, the best idea would be to search the web for:

+
    +
  • "[YOUR DEVICE NAME] port forwarding configuration"; or
  • +
  • "[YOUR DEVICE NAME] NAT configuration"
  • +
+

A typical configuration process goes something like this:

+
    +
  1. The router sub-process you need to configure is called Network Address Translation (NAT) but it's not unheard of for this functionality to be grouped with FireWall.
  2. +
  3. +

    The NAT component you are looking for probably has a name like "Port Redirection", "Port Forwarding", "NAT Forwarding" or "NAT Virtual Server".

    +
      +
    • It might also be under "Open Ports" but those are usually one-to-one mappings (ie incomingPort=outgoingPort), apply to port ranges, and are intended to target a single DMZ host.
    • +
    +
  4. +
  5. +

    The configuration screen will contain at least the following fields:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldValue
    Interfacerouter's WAN interface
    Private IPx.x.x.x
    Private Port«external»
    ProtocolUDP
    Public Port«public»
    Service NameWireGuard
    +

    The fields in the above list are in alphabetical order. They will almost certainly be in a different order in your router and may also have different names:

    +
      +
    • Interface is typically a popup menu. Generally it will either default to the name of the physical port on your router that connects to the outside world, or be some other sensible default like "All".
    • +
    • Private IP (or Internal IP) is the IP address of the Raspberry Pi running WireGuard. Note that this pretty much forces you to give your Raspberry Pi a statically-configured IP address (either a static binding in your DHCP server or a hard-coded address in the Raspberry Pi itself).
    • +
    • +

      Private Port (or Internal Port) needs to be the value you chose for «external» in the WireGuard service definition (51820 if you didn't change it).

      +
      +

      Yes, this does sound counterintuitive but it's a matter of perspective. From the router's perspective, the port is on the private or internal part of your home network. From Docker's perspective, the port is «external» to container-space.

      +
      +
    • +
    • +

      Protocol will usually default to "TCP" but you must change it to "UDP".

      +
    • +
    • Public Port or External Port needs to be the value you chose for «public» in the WireGuard service definition (51820 if you didn't change it).
    • +
    • Service Name (or Service Type) is typically a text field, an editable menu (where you can either make a choice or type your own value), or a button approximating an editable menu. If you are given the option of choosing "WireGuard", do that, otherwise just type that name into the field. It has no significance other than reminding you what the rule is for.
    • +
    +
  6. +
+

Step 9: Configure your remote WireGuard clients

+

This is a massive topic and one which is well beyond the scope of this guide. You really will have to work it out for yourself. Start by Googling:

+
    +
  • "[YOUR DEVICE NAME] install WireGuard client".
  • +
+

You will find the list of client software at WireGuard Installation.

+

For portable devices (eg iOS and Android) it usually boils down to:

+
    +
  1. Install the app on your portable device.
  2. +
  3. Display the QR code the WireGuard server generated for the device.
  4. +
  5. Launch the app.
  6. +
  7. Point the device's camera at the QR code.
  8. +
  9. Follow your nose.
  10. +
+

Understanding WireGuard's port numbers

+

Here's a concrete example configuration using three different port numbers:

+
environment:
+- SERVERURL=downunda.duckdns.org
+- SERVERPORT=51620
+ports:
+- "51720:51820/udp"
+
+

In other words:

+
    +
  1. The «public» port is 51620.
  2. +
  3. The «external» port is 51720.
  4. +
  5. The «internal» port is 51820.
  6. +
+

You also need to make a few assumptions:

+
    +
  1. The host running the remote WireGuard client (eg a mobile phone with the WireGuard app installed) has been allocated the IP address 55.66.77.88 when it connected to the Internet over 3G/4G/5G.
  2. +
  3. When the remote WireGuard client initiated the session, it chose UDP port 44524 as its source port. The actual number chosen is (essentially) random and only significant to the client.
  4. +
  5. Your Internet Service Provider allocated the IP address 12.13.14.15 to the WAN side of your router.
  6. +
  7. You have done all the steps in Set up a Dynamic DNS name and your WAN IP address (12.13.14.15) is being propagated to your Dynamic DNS service provider.
  8. +
+

Here's a reference model to help explain what occurs:

+

WireGuard port model

+

The remote WireGuard client:

+
    +
  1. Obtains the Dynamic DNS domain name ("downunda.duckdns.org") and «public» UDP port (51620) from the configuration contained within the QR code. Recall that those values are obtained from the SERVERURL= and SERVERPORT= environment variables in docker-compose.yml.
  2. +
  3. Executes a DNS query for the domain name "downunda.duckdns.org" to obtains the WAN IP address (12.13.14.15) of your home router.
  4. +
  5. Addresses outgoing packets to 12.13.14.15:51620.
  6. +
+

You configure a NAT port-forwarding rule in your router which accepts incoming traffic on the «public» UDP port (51620) and uses Network Address Translation to change the destination IP address to the Raspberry Pi and destination port to the «external» UDP port (51720). In other words, each incoming packet is readdressed to 192.168.203.60:51720.

+

Docker is listening to the Raspberry Pi's «external» UDP port 51720. Docker uses Network Address Translation to change the destination IP address to the WireGuard container and destination port to the «internal» UDP port (51820). In other words, each incoming packet is readdressed to 172.18.0.6:51820.

+

The packet is then routed to the internal bridged network, and delivered to the WireGuard server process running in the container which is listening on the «internal» UDP port (51820).

+

A reciprocal process occurs when the WireGuard server process sends packets back to the remote WireGuard client.

+

The following table summarises the transformations as the client and server exchange information:

+

WireGuard NAT table

+

Even if you use port 51820 everywhere (the default), all this Network Address Translation still occurs. Keep this in mind if you are trying to debug WireGuard because you may actually find it simpler to understand what is going on if you use different numbers for the «public» and «external» ports.

+

This model is a slight simplification because the remote client may also be also operating behind a router performing Network Address Translation. It is just easier to understand the basic concepts if you assume the remote client has a publicly-routable IP address.

+

Debugging techniques

+

Monitor WireGuard traffic between your router and your Raspberry Pi

+

If tcpdump is not installed on your Raspberry Pi, you can install it by:

+
$ sudo apt install tcpdump
+
+

After that, you can capture traffic between your router and your Raspberry Pi by:

+
$ sudo tcpdump -i eth0 -n udp port «external»
+
+

Press ctrlc to terminate the capture.

+

Monitor WireGuard traffic between your Raspberry Pi and the WireGuard container

+

First, you need to add tcpdump to the container. You only need to do this once per debugging session. The package will remain in place until the next time you re-create the container.

+
$ docker exec wireguard bash -c 'apt update ; apt install -y tcpdump'
+
+

To monitor traffic:

+
$ docker exec -t wireguard tcpdump -i eth0 -n udp port «internal»
+
+

Press ctrlc to terminate the capture.

+

Is Docker listening on the Raspberry Pi's «external» port?

+
$ PORT=«external»; sudo nmap -sU -p $PORT 127.0.0.1 | grep "$PORT/udp"
+
+

There will be a short delay. The expected answer is either:

+
    +
  • «external»/udp open|filtered unknown = Docker is listening
  • +
  • «external»/udp closed unknown = Docker is not listening
  • +
+

Success implies that the container is also listening.

+

Is your router listening on the «public» port?

+
$ PORT=«public»; sudo nmap -sU -p $PORT downunda.duckdns.org | grep "$PORT/udp"
+
+

There will be a short delay. The expected answer is either:

+
    +
  • «public»/udp open|filtered unknown = router is listening
  • +
  • «public»/udp closed unknown = router is not listening
  • +
+

Note:

+
    +
  • Some routers always return the same answer irrespective of whether the router is or isn't listening to the port being checked. This stops malicious users from working out which ports might be open. This test will not be useful if your router behaves like that. You will have to rely on tcpdump telling you whether your router is forwarding traffic to your Raspberry Pi.
  • +
+

The read-only flag

+

The :ro at the end of the following line in WireGuard's service definition means "read only":

+
- /lib/modules:/lib/modules:ro
+
+

If that flag is omitted then WireGuard may try to update the /lib/modules path in your operating system. To be clear, /lib/modules is both outside the WireGuard container and outside the normal persistent storage area in the ./volumes directory.

+

The basic idea of containers is that processes are contained, include all their own dependencies, can be added and removed cleanly, and don't change the underlying operating system.

+

Writing into /lib/modules is not needed on a Raspberry Pi, providing that Raspberry Pi OS is up-to-date. That is why the first step in the installation procedure tells you to bring the system up-to-date.

+

If WireGuard refuses to install and you have good reason to suspect that WireGuard may be trying to write to /lib/modules then you can consider removing the :ro flag and re-trying. Just be aware that WireGuard will likely be modifying your operating system.

+

Updating WireGuard

+

To update the WireGuard container:

+
$ cd ~/IOTstack
+$ docker-compose pull wireguard
+
+

If a new image comes down, then:

+
$ docker-compose up -d wireguard
+$ docker system prune
+
+

2022-10-01 WireGuard migration

+

WireGuard's designers have redefined the structure they expect in the persistent storage area. Before the change, a single volume-mapping got the job done:

+
volumes:
+- ./volumes/wireguard:/config
+
+

After the change, three mappings are required:

+
volumes:
+- ./volumes/wireguard/config:/config
+- ./volumes/wireguard/custom-cont-init.d:/custom-cont-init.d
+- ./volumes/wireguard/custom-services.d:/custom-services.d
+
+

In essence, inside the container:

+
    +
  • old: custom-cont-init.d and custom-services.d directories were subdirectories of /config;
  • +
  • new: custom-cont-init.d and custom-services.d are top-level directories alongside /config.
  • +
+

The new custom-cont-init.d and custom-services.d directories also need to be owned by root. Previously, they could be owned by "pi".

+

IOTstack users implementing WireGuard for the first time will get the correct structure. Existing users need to migrate. The process is a little messy so IOTstack provides a script to automate the restructure:

+
$ cd ~/IOTstack
+$ docker-compose down wireguard
+$ ./scripts/2022-10-01-wireguard-restructure.sh
+
+
+

see also if downing a container doesn't work

+
+

In words:

+
    +
  • Be in the correct directory
  • +
  • Stop WireGuard (the script won't run if you don't do this)
  • +
  • Run the script
  • +
+

The script:

+
    +
  1. Renames ./volumes/wireguard to ./volumes/wireguard.bak; then
  2. +
  3. Builds the new ./volumes/wireguard structure using ./volumes/wireguard.bak for its source material.
  4. +
  5. Finishes by reminding you to update your docker-compose.yml to adopt the new service definition.
  6. +
+

Your WireGuard client configurations (QR codes) are not affected by the migration.

+

Once the migration is complete and you have adopted the new service definition, you can start WireGuard again:

+
$ docker-compose up -d wireguard
+
+

You should test that your remote clients can still connect. Assuming a successful migration, you can safely delete the backup directory:

+
$ sudo rm -rf ./volumes/wireguard.bak
+
+
+

Always be careful when using sudo in conjunction with recursive remove. Double-check everything before pressing return.

+
+

Getting a clean slate

+

If WireGuard misbehaves, you can start over from a clean slate. You may also need to do this if you change any of the following environment variables:

+
- SERVERURL=
+- SERVERPORT=
+- PEERS=
+- PEERDNS=
+
+

The procedure is:

+
    +
  1. +

    If WireGuard is running, terminate it:

    +
    $ cd ~/IOTstack
    +$ docker-compose down wireguard
    +
    +
    +

    see also if downing a container doesn't work

    +
    +
  2. +
  3. +

    Erase the persistent storage area (essential):

    +
    $ sudo rm -rf ./volumes/wireguard
    +
    +
    +

    Be very careful with that command and double-check your work before you hit return.

    +
    +

    Erasing the persistent storage area:

    +
      +
    • destroys the old client configurations and invalidates any copies of QR codes. Existing clients will stop working until presented with a new QR code.
    • +
    • deactivates PEERDNS=auto with custom-cont-init.
    • +
    +
  4. +
  5. +

    Start WireGuard:

    +
    $ docker-compose up -d wireguard
    +
    +

    This will generate new client configurations and QR codes for your devices.

    +

    Remember to re-activate PEERDNS=auto with custom-cont-init if you need it.

    +
  6. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/X2go/index.html b/Containers/X2go/index.html new file mode 100644 index 000000000..4334c6dd6 --- /dev/null +++ b/Containers/X2go/index.html @@ -0,0 +1,2225 @@ + + + + + + + + + + + + + + + + + + + + + + + + + x2go - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

x2go

+

x2go is an "alternative" to using VNC for a remote connection. It uses X11 forwarding over ssh to provide a desktop environment

+

Reason for using: +I have a Pi 4 and I didn't buy a micro hdmi cable. You can use VNC however you are limited to a 800x600 window.

+

Installation

+

Install with sudo apt install x2goserver

+

x2go cant connect to the native Raspbian Desktop so you will need to install another with sudo tasksel

+

image

+

I chose Xfce because it is light weight.

+

Install the x2go client from their website

+

Now I have a full-screen client

+

image

+

YouTube tutorial

+

Laurence systems

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/ZeroTier-vs-WireGuard/index.html b/Containers/ZeroTier-vs-WireGuard/index.html new file mode 100644 index 000000000..20e846f02 --- /dev/null +++ b/Containers/ZeroTier-vs-WireGuard/index.html @@ -0,0 +1,2425 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ZeroTier vs WireGuard - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

ZeroTier vs WireGuard

+

ZeroTier and WireGuard are not mutually exclusive. You can run both if you wish. The purpose of this document is to try to offer some general guidance about the two solutions.

+

Installation differences

+

Assume your goal is to give yourself access to your home network when you are on the road. This is something you can do with both WireGuard and ZeroTier.

+

WireGuard

+

Providing you follow IOTstack's WireGuard documentation faithfully, WireGuard is a bit easier to get going than ZeroTier.

+

Although it helps to have some feeling for TCP/IP fundamentals, you definitely don't need to be a comms guru.

+

Using WireGuard to access your home network when you are on the road involves:

+
    +
  1. +

    A routable IP address on the WAN side of your home router.

    +
    +

    The IP address on the WAN side of your home router is allocated by your ISP. It can be fixed or dynamic. If you have not explicitly signed up for a fixed IP address service then your address is probably dynamic and can change each time you reboot your router, or if your ISP "bounces" your connection.

    +
    +
  2. +
  3. +

    If your WAN IP address is dynamic then you need a mechanism for making it discoverable using a Dynamic Domain Name System (DDNS) service such as DuckDNS or NoIP.com.

    +
    +

    That's a separate registration and setup process.

    +
    +
  4. +
  5. +

    A WireGuard server running in a Docker container on your Raspberry Pi. Ideally, you give some thought to the clients you will need so that the QR codes can be generated the first time you bring up the container.

    +
  6. +
  7. +

    A WireGuard client running in each remote device. Each client needs to be configured with a QR code or configuration file created in the previous step.

    +
  8. +
  9. +

    A port-forwarding rule in your home router so that traffic originated by remote WireGuard clients can be relayed to the WireGuard server running on your Raspberry Pi.

    +
  10. +
+

ZeroTier

+

Implementing ZeroTier is not actually any more difficult to get going than WireGuard. ZeroTier's apparent complexity arises from the way it inherently supports many network topologies. Getting it set up to meet your requirements takes planning.

+

You still don't need to be a comms guru but it will help if you've had some experience making TCP/IP do what you want.

+

Using ZeroTier to access your home network when you are on the road involves:

+
    +
  1. +

    Registering for a ZeroTier account (free and paid levels).

    +
  2. +
  3. +

    Either (or both) of the following:

    +
      +
    • A ZeroTier client running on every device at your home to which you need remote access;
    • +
    • A ZeroTier-router client running in a Docker container on a Raspberry Pi at your home. This is analogous to the WireGuard server.
    • +
    +
  4. +
  5. +

    A ZeroTier client running in each remote device.

    +
  6. +
  7. +

    Every ZeroTier client (home and remote) needs to be provided with your ZeroTier network identifier. You also need to authorise each client to join your ZeroTier network. Together, these are the equivalent of WireGuard's QR code.

    +
  8. +
  9. +

    Depending on what you want to achieve, you may need to configure one or more static routes in the ZeroTier Cloud and in your home router.

    +
  10. +
+

The things you don't need to worry about include:

+
    +
  • Whether the IP address on the WAN side of your home router is routable;
  • +
  • Any port-forwarding rules in your home router; or
  • +
  • Setting up a Dynamic Domain Name System (DDNS) service.
  • +
+

CGNAT – WireGuard's nemesis

+

Now that you have some appreciation for the comparative level of difficulty in setting up each service, let's focus on WireGuard's key problem.

+

WireGuard depends on the IP address on the WAN side of your home router being routable. What that means is that the IP address has to be known to the routing tables of the core routers that drive the Internet.

+

You will probably have seen quite a few of the addresses in the following table:

+ + + + + + + + + + + +
Table 1: Reserved IP Address Ranges
selected IPv4 Address Ranges
+

Nothing in that list is routable. That list is also far from complete (see wikipedia). The average IOTstack user has probably encountered at least:

+
    +
  • 172.16/12 - commonly used by Docker to allocate its internal networks.
  • +
  • 192.168/16 - used by a lot of consumer equipment such as home routers.
  • +
+ + + + + + + + + + + +
Figure 1: Router WAN port using CGNAT range
Image titleImage title
+

Consider Figure 1. On the left is a cloud representing your home network where you probably use a subnet in the 192.168/16 range. The 192.168/16 range is not routable so, to exchange packets with the Internet, your home router needs to perform Network Address Translation (NAT).

+

Assume a computer on your home network has the IP address 192.168.1.100 and wants to communicate with a service on the Internet. What the NAT service running in your home router does is:

+
    +
  • in the outbound direction, packets leaving your LAN will have a source IP address of 192.168.1.100. NAT replaces the source IP address with the IP address of the WAN side of your home router. Let's say that's 200.1.2.3.
  • +
  • the system at the other end thinks the packets are coming from 200.1.2.3 so that's what it uses when it sends back reply packets.
  • +
  • in the inbound direction, packets arrive with a destination IP address of 200.1.2.3. NAT replaces the destination address 200.1.2.3 with 192.168.1.100 and sends the packet to the device on your home network that originated the traffic.
  • +
+

The NAT service running in your router builds tables that keep track of everything needed to make this work but, and this is a critical point, NAT can only build those tables when devices on your home LAN originate the traffic. If a packet addressed to your WAN IP arrives unexpectedly and NAT can't figure out what to do from its tables, the packet gets dropped.

+

A remote WireGuard client trying to originate a connection with the WireGuard server running in your IOTstack is an example of an "unexpected packet". The reason it doesn't get dropped is because of the port-forwarding rule you set up in your router. That rule essentially fools NAT into believing that the WireGuard server originated the traffic.

+

If the IP address your ISP assigns to your router's WAN interface is routable then your traffic will follow the green line in Figure 1. It will transit your ISP's network, be forwarded to the Internet, and reply packets will come back the same way.

+

However, if the WAN IP address is not routable then your traffic will follow the red line in Figure 1. What happens next is another round of Network Address translation. Using the same address examples above:

+
    +
  • Your router "A" replaces 192.168.1.100 with the IP address of the WAN side of your home router but, this time, that's a non-routable address like 100.64.44.55; and then
  • +
  • Your ISP's router "B" replaces 100.64.44.55 with 200.1.2.3.
  • +
+

The system at the other end sees 200.1.2.3 as the source address so that's what it uses in reply packets.

+

Both NAT engines "A" and "B" are building tables to make this work but, again, it is all in response to outbound traffic. If your remote WireGuard client tries to originate a connection with your WireGuard server by addressing the packet to "B", it's unexpected and gets dropped.

+

Unlike the situation with your home router where you can add a port-forwarding rule to fool NAT into believing your WireGuard server originated the traffic, you don't control your ISP's NAT router so it's a problem you can't fix.

+

Your remote WireGuard client can't bypass your ISP's NAT router by addressing the packet to "A" because that address is not routable, so nothing on the Internet has any idea of where to send it, so the packet gets dropped.

+

Due to the shortage of IPv4 addresses, it is increasingly common for ISPs to apply their own NAT service after yours. Generally, ISPs use the 100.64/10 range so, if you connect to your home router's user interface and see something like the IP address circled in Figure 2, you can be sure that you are the victim of "CGNAT".

+ + + + + + + + + + + +
Figure 2: Router WAN port using CGNAT range
Router CGNAT WAN IP address
+

While seeing a router WAN address that is not routable proves that your ISP is performing an additional Network Address Translation step, seeing an IP address that should be routable does not necessarily prove the opposite. The only way to be certain is to compare the IP address your router shows for its WAN interface with the IP address you see in a service like whatsmyip.com. If they are not the same, your ISP is likely applying its own NAT service.

+

If WireGuard won't work and you suspect your ISP is applying its own NAT service, you have the following options:

+
    +
  1. Negotiate with your ISP to be allocated a fixed IP address in a routable range. You may be asked to pay ongoing fees for this.
  2. +
  3. Change your ISP for one that still allocates routable IP addresses. But this may merely postpone the inevitable. To conserve dwindling IPv4 addresses, many ISPs are implementing Carrier Grade Network Address Translation (CGNAT).
  4. +
  5. If your ISP offers it, implement IPv6 on your home network. This is a non-trivial task and well beyond the scope of IOTstack's documentation.
  6. +
  7. Use a Virtual Private Server (VPS) to work around the problem. Explaining this is also well beyond the scope of IOTstack. Google "wireguard cgnat", grab a cup of coffee, and settle down for an afternoon's reading.
  8. +
  9. Switch to ZeroTier. You can think of it as being "like WireGuard with its own VPS".
  10. +
+

Site-to-site tunnelling

+

You can use both WireGuard and ZeroTier to set up secure site-to-site routing such as between your home and the homes of your friends and relatives.

+

If you want to use WireGuard:

+
    +
  1. Make sure that all sites running WireGuard obey the CGNAT constraints mentioned above.
  2. +
  3. Conduct your own research into how to set it up because the IOTstack documentation for WireGuard does not cover the topic.
  4. +
+

If you want to use ZeroTier:

+
    +
  1. ZeroTier is immune to CGNAT constraints.
  2. +
  3. The IOTstack documentation for ZeroTier explains the how-to.
  4. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/ZeroTier/index.html b/Containers/ZeroTier/index.html new file mode 100644 index 000000000..e2a05c6d4 --- /dev/null +++ b/Containers/ZeroTier/index.html @@ -0,0 +1,3935 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ZeroTier - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

ZeroTier

+

ZeroTier is a Virtual Private Network (VPN) solution that creates secure data-communications paths between devices at different locations. You can use ZeroTier to:

+
    +
  • give remote devices secure access to your home network's local services;
  • +
  • provide secure network-to-network communications between your home network and the home networks of your friends and relations; and
  • +
  • bypass carrier-grade network address translation (CGNAT) which can befuddle WireGuard.
  • +
+

ZeroTier Docker images

+

This documentation covers two DockerHub images and two IOTstack templates:

+
    +
  • +

    zyclonite:zerotier

    +

    This image implements a standard ZeroTier client. It is what you get if you choose "ZeroTier-client" from the IOTstack menu. Its function is identical to the clients you install on Android, iOS, macOS and Windows.

    +
  • +
  • +

    zyclonite:zerotier-router

    +

    This is an enhanced version of the ZeroTier client. It is what you get if you choose "ZeroTier-router" from the IOTstack menu. In addition to connecting your Raspberry Pi to your ZeroTier network, it can also forward packets between remote clients and devices attached to your home LAN. It is reasonably close to WireGuard in its general behaviour.

    +
  • +
+

References

+ +

Definition

+
    +
  • Catenet (a concatenation of networks) means the collection of networks and clients that can reach each other either across a local network or via a path through a ZeroTier Cloud.
  • +
+

Getting started with ZeroTier

+

Create an account

+

ZeroTier offers both free and paid accounts. A free account offers enough for the average home user.

+

Go to the Zerotier downloads page. If you wait a little while, a popup window will appear with a "Start here" link which triggers a wizard to guide you through the registration and setup process. At the end, you will have an account plus an initial ZeroTier Network ID.

+
+

Tip: Make a note of your ZeroTier network ID - you will need it!

+
+

You should take the time to work through the configuration page for your newly-created ZeroTier network. At the very least:

+
    +
  1. Give your ZeroTier network a name. At this point you only have a single network but you may decide to create more. Meaningful names are always easier on the brain than 16-hex-digit numbers.
  2. +
  3. +

    Scroll down until you see the "IPv4 Auto-Assign" area. By default, ZeroTier will have done the following:

    +
      +
    • Enabled "Auto-Assign from Range";
    • +
    • Selected the "Easy" button; and
    • +
    • Randomly-selected one of the RFC1918 private ranges below the line.
    • +
    +

    If the range selected by ZeroTier does not begin with "10.x", consider changing the selection to something in that range. This documentation uses 10.244.*.* throughout and it may be easier to follow if you do something similar.

    +
    +

    Tip: avoid 10.13.*.* if you are also running WireGuard.

    +
    +

    The logic behind this recommendation is that you can use 10.x.x.x for ZeroTier and 192.168.x.x for your home networks, leaving 172.x.x.x for Docker. That should make it easier to understand what is going on when you examine routing tables.

    +

    Nevertheless, nothing about ZeroTier depends on you using a 10.x network. If you have good reasons for selecting from a different range, do so. It's your network!

    +
  4. +
+

Install client on "remote"

+

You should install ZeroTier client software on at least one mobile device (laptop, iDevice) that is going to connect remotely. You don't need to go to a remote location or fake "remoteness" by connecting through a cellular system. You can do all this while the device is connected to your home network.

+

Connecting a client to your ZeroTier network is a three-step process:

+
    +
  1. +

    Install the client software on the device. The Zerotier downloads page has clients for every occasion: Android, iOS, macOS, Unix and Windows.

    +
  2. +
  3. +

    Launch the client and enter your ZeroTier Network ID:

    +
      +
    • +

      on macOS, launching the app adds a menu to the right hand side of your menu bar. From that menu, choose "Join New Network…", enter your network ID into the dialog box and click "Join".

      +
    • +
    • +

      on iOS, launching the app for the first time presents you with a privacy policy which you need to accept, followed by a mostly-blank screen:

      +
        +
      • Tap +, accept the privacy policy (again) and enter your network ID into the field.
      • +
      • Leave the other settings alone and tap "Add Network". Acknowledge any security prompt (what you see depends on your version of iOS).
      • +
      • Turn on the slider button.
      • +
      +
    • +
    • +

      Android and Windows – follow your nose.

      +
    • +
    +
  4. +
  5. +

    In a web browser:

    +
      +
    • connect to ZeroTier Central: https://my.zerotier.com
    • +
    • login to your account
    • +
    • click on your network ID
    • +
    • scroll down to the "Members" area
    • +
    • find the newly-added client
    • +
    • authorise the client by turning on its "Auth?" checkbox
    • +
    • fill in the "Name" and, optionally, the "Description" fields so that you can keep track of the device associated with the client ID. Again, names are easier on the brain than numbers.
    • +
    +
  6. +
+

Each time you authorise a client, ZeroTier assigns an IP address from the range you selected in the "IPv4 Auto-Assign" area. Most of the time this is exactly what you want but, occasionally, you may want to override ZeroTier's choice. The simplest approach is:

+
    +
  • +

    Type a new IP address into the text field to the right of the + ;

    +
    +

    your choice needs to be from the range you selected in the "IPv4 Auto-Assign" area

    +
    +
  • +
  • +

    Click the + to accept the address; then

    +
  • +
  • Delete the unwanted address by clicking the trash-can icon to its left.
  • +
+

ZeroTier IP addresses are like fixed assignments from a DHCP server. They persist. The same client will always get the same IP address each time it connects.

+

Key point:

+
    +
  • Clients can't join your ZeroTier network without your approval. If a new client appears in the list which you don't recognise, click the trash-can icon at the far right of its row. That denies the client access - permanently. The client needs to be reset before it can make another attempt.
  • +
+

Other devices

+

Do not install ZeroTier on your Raspberry Pi by following the Linux instructions on the Zerotier downloads page. Those instructions lead to a "native" installation. We are about to do all that with a Docker container.

+

You can install ZeroTier clients on other systems but you should hold off on doing that for now because, ultimately, it may not be needed. Whether you need ZeroTier client software on any device will depend on the decisions you make as you follow these instructions.

+

Topology 1: ZeroTier client-only

+

To help you choose between the ZeroTier-client and ZeroTier-router containers, it is useful to study a network topology that does not include routing.

+ + + + + + + + + + + +
Topology 1: Remote client accesses client on home network
ZeroTier - topology 1
+

Four devices are shown:

+
    +
  • A is a Raspberry Pi running "ZeroTier-client" installed by IOTstack.
  • +
  • +

    B is some other device (another Pi, Linux box, Mac, PC).

    +
    +

    The key thing to note is that B is not running ZeroTier client software.

    +
    +
  • +
  • +

    C is your local router, likely an off-the-shelf device running a custom OS.

    +
    +

    Again, assume C is not running ZeroTier client software.

    +
    +
  • +
  • +

    G is the remote client you set up above.

    +
  • +
+

Table 1 summarises what you can and can't do from the remote client G:

+ + + + + + + + + + + +
Table 1: Reachability using only ZeroTier clients
ZeroTier - topology 1 reachability
+

G can't reach B or C, directly, because those devices are not running ZeroTier client software.

+

G can reach B and C, indirectly, by first connecting to A. An example would be G opening an SSH session on A then, within that session, opening another SSH session on B or C.

+

It should be apparent that you can also solve this problem by installing ZeroTier client software on B. It would then have its own interface in the 10.244.0.0/16 network that forms the ZeroTier Cloud and be reachable directly from G. The no entries would then become yes, with the caveat that G would reach B via its interface in the 10.244.0.0/16 network.

+

The same would be true for your router C, providing it was capable of running ZeroTier client software.

+

Lessons to learn:

+
    +
  1. All hosts running a ZeroTier client and sharing a common ZeroTier Network ID can reach each other.
  2. +
  3. You can springboard from a host that is reachable to a host that is otherwise unreachable, but your ability to do that in any given situation may depend on the protocol you are trying to use.
  4. +
+

ZeroTier clients are incredibly easy to set up. It's always:

+
    +
  1. Install the client software.
  2. +
  3. Tell the client the network ID.
  4. +
  5. Authorise the device.
  6. +
+

After that, it's full peer-to-peer interworking.

+

The problem with this approach is that it does not scale if you are only signed up for a free ZeroTier account. Free accounts are limited to 25 clients. After that you need a paid account.

+

Installing ZeroTier-client

+

Now that you understand what the ZeroTier-client will and won't do, if you want to install the ZeroTier client on your Raspberry Pi via IOTstack, proceed like this:

+
    +
  1. Run the IOTstack menu and choose "Zerotier-client".
  2. +
  3. +

    Bring up the container:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d zerotier-client
    +
    +
  4. +
  5. +

    Tell the container to join your ZeroTier network by replacing «NetworkID» with your ZeroTier Network ID:

    +
    $ docker exec zerotier zerotier-cli join «NetworkID» 
    +
    +

    You only need to do this once. The information is kept in the container's persistent storage area. Thereafter, the client will rejoin the same network each time the container comes up.

    +
  6. +
  7. +

    Go to ZeroTier Central and authorise the device.

    +
  8. +
+

Job done! There are no environment variables to set. It just works.

+

Topology 2: ZeroTier router

+

This topology is a good starting point for using ZeroTier to replicate a WireGuard service running on your Raspberry Pi. Remember, you don't have to make an either/or choice between ZeroTier and WireGuard. You can run both containers side-by-side.

+ + + + + + + + + + + +
Topology 2: Remote client accesses home network
ZeroTier - topology 2
+

With this structure in place, all hosts in Topology 2 can reach each other directly. All the cells in Table 1 are yes. Full peer-to-peer networking!

+

Installing ZeroTier-router

+

The ZeroTier-router container is just the ZeroTier-client container with some iptables rules. However, you can't run both containers at the same time. If ZeroTier-client is installed:

+
    +
  1. +

    Terminate the container if it is running:

    +
    $ cd ~/IOTstack
    +$ docker-compose down zerotier-client
    +
    +
    +

    See also if downing a container doesn't work

    +
    +
  2. +
  3. +

    Remove the existing service definition, either by:

    +
      +
    • running the menu and de-selecting "ZeroTier-client"; or
    • +
    • editing your docker-compose.yml to remove the service definition.
    • +
    +
  4. +
+

The ZeroTier-router can re-use the ZeroTier-client configuration (and vice-versa) so you should not erase the persistent storage area at:

+
~/IOTstack/volumes/zerotier-one/
+
+

Keeping the configuration also means you won't need to authorise the ZeroTier-router client when it first launches.

+

To install Zerotier-router:

+
    +
  1. +

    Run the IOTstack menu and choose "Zerotier-router".

    +
  2. +
  3. +

    Use a text editor to open your docker-compose.yml. Find the ZeroTier service definition and the environment variables it contains:

    +
     5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
      environment:
    +  - TZ=${TZ:-Etc/UTC}
    +  - PUID=1000
    +  - PGID=1000
    +# - ZEROTIER_ONE_NETWORK_IDS=yourNetworkID
    +  - ZEROTIER_ONE_LOCAL_PHYS=eth0 wlan0
    +  - ZEROTIER_ONE_USE_IPTABLES_NFT=true
    +  - ZEROTIER_ONE_GATEWAY_MODE=both
    +
    +

    You should:

    +
      +
    1. Set your timezone.
    2. +
    3. +

      Uncomment line 9 and replace "yourNetworkID" with your ZeroTier Network ID. This variable only has an effect the first time ZeroTier is launched. It is an alternative to executing the following command after the container has come up the first time:

      +
      $ docker exec zerotier zerotier-cli join «NetworkID»
      +
      +

      The reason for the plural variable name ("IDS") is because it supports joining multiple networks on first launch. Network IDs are space-separated, like this:

      +
      9
      - ZEROTIER_ONE_NETWORK_IDS=3926d64e8ff148b3 ef7a364a687c45e0
      +
      +
    4. +
    5. +

      If necessary, change line 10 to represent your active local interfaces. Examples:

      +
        +
      • +

        if your Raspberry Pi only connects to WiFi, you would use:

        +
        10
        - ZEROTIER_ONE_LOCAL_PHYS=wlan0
        +
        +
      • +
      • +

        if both Ethernet and WiFi are active, use:

        +
        10
        - ZEROTIER_ONE_LOCAL_PHYS=eth0 wlan0
        +
        +
      • +
      +
    6. +
    +
  4. +
  5. +

    Launch the container:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d zerotier-router
    +
    +
  6. +
  7. +

    If the Raspberry Pi running the service has not previously been authorised in ZeroTier Central, authorise it. Make a note of the IP address assigned to the device in ZeroTier Central. In Topology 2 it is 10.244.0.1.

    +
  8. +
  9. +

    You also need to set up some static routes:

    +
      +
    • +

      In ZeroTier Central …

      +

      Please start by reading Managed Routes.

      +

      Once you understand how to construct a valid less-specific route, go to ZeroTier Central and find the "Managed Routes" area. Under "Add Routes" are text-entry fields. Enter the values into the fields:

      +
      Destination: 192.168.202.0/23 (via) 10.244.0.1
      +
      +

      Click Submit.

      +

      With reference to Topology 2:

      +
        +
      • 192.168.202.0/23 is the less-specific route to the home network; and
      • +
      • 10.244.0.1 is the IP address of A in the ZeroTier Cloud.
      • +
      +

      This route teaches ZeroTier clients that the 10.244.0.0/16 network offers a path to the less-specific range (192.168.202.0/23) encompassing the home subnet (192.168.203.0/24).

      +

      Remote clients can then reach devices on your home network. When a packet arrives on A, it is passed through NAT so devices on your home network "think" the packet has come from A. That means they can reply. However, this only works for connections that are initiated by remote clients like G. Devices on your home network like B and C can't initiate connections with remote clients because they don't know where to send the traffic. That's the purpose of the next static route.

      +
    • +
    • +

      In your home router C

      +

      Add a static route to the ZeroTier Cloud pointing to the IP address of your Raspberry Pi on your home network. In Topology 2, this is:

      +
      10.244.0.0/16 via 192.168.203.50
      +
      +
      +

      You need to figure out how to add this route in your router's user interface.

      +
      +

      Here's an example of what actually happens once this route is in place. Suppose B wants to communicate with G. B is not a ZeroTier client so it doesn't know that A offers a path to G. The IP stack running on B sends the packet to the default gateway C (your router). Because of the static route, C sends the packet to A. Once the packet arrives on A, it is forwarded via the ZeroTier Cloud to G.

      +

      The process of a packet going into a router and coming back out on the same interface is sometimes referred to as "one-armed routing". It may seem inefficient but C also sends B what is called an "ICMP Redirect" message. This teaches B that it reach G via A so, in practice, not every B-to-G packet needs to transit C.

      +
    • +
    +
  10. +
+

Topology 3: Full tunnel

+

The ZeroTier Cloud does not offer a path to the Internet. It is not a VPN solution which will allow you to pretend to be in another location. Every ZeroTier client still needs its own viable path to the Internet.

+ + + + + + + + + + + +
Topology 3: Remote client tunnels to Internet via Home Network
ZeroTier - topology 3
+

In terms of traffic flows, what this means in a practical sense is:

+
    +
  • Traffic from G to [A, B or C] (and vice versa) flows over the ZeroTier Cloud and is securely end-to-end encrypted in transit; but
  • +
  • All other traffic goes straight to the ISP or cellular carrier and is not encrypted.
  • +
+

This is the routing table you would expect to see on G:

+
1
+2
+3
+4
+5
Destination     Gateway         Genmask         Flags   MSS Window  irtt Iface
+0.0.0.0         172.20.10.1     0.0.0.0         UG        0 0          0 wlan0
+10.244.0.0      0.0.0.0         255.255.0.0     U         0 0          0 ztr2qsmswx
+172.20.10.0     0.0.0.0         255.255.255.240 U         0 0          0 wlan0
+192.168.202.0   10.244.0.1      255.255.254.0   UG        0 0          0 ztr2qsmswx
+
+

Executing a traceroute to 8.8.8.8 (Google DNS) shows:

+
$ traceroute 8.8.8.8
+traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets
+ 1  172.20.10.1 (172.20.10.1)  4.706 ms  4.572 ms  4.398 ms
+ 2  10.111.9.189 (10.111.9.189)  49.599 ms  49.807 ms  49.626 ms
+…
+11  dns.google (8.8.8.8)  32.710 ms  32.047 ms
+
+

You can see that the first hop is via 172.20.10.1. This means the traffic is not flowing over the ZeroTier Cloud (10.244.0.0/16). The traffic is reaching 8.8.8.8 via the default route through the phone's connection to the carrier's network (172.20.10.0/28).

+

ZeroTier supports an option for forcing all of a client's traffic to pass over the ZeroTier Cloud. The client's traffic is then end-to-end encrypted, at least until it reaches your home. Traffic destined for the Internet will then pass back out through your home router. From the perspective of the Internet, your remote client will appear to be at your home.

+

Enabling this feature is a two-step process:

+
    +
  1. +

    In ZeroTier Central, find the "Managed Routes" area and add:

    +
    Destination: 0.0.0.0/0 (via) 10.240.0.1
    +
    +

    This is setting up a "default route". 10.240.0.1 is the IP address of A in the ZeroTier network.

    +
  2. +
  3. +

    Each remote client (and only remote clients) needs to be instructed to accept the default route from the ZeroTier Cloud:

    +
      +
    • +

      iOS clients:

      +
        +
      1. Launch the ZeroTier One app.
      2. +
      3. If the connection is not already enabled, turn it on and wait for it to start.
      4. +
      5. Tap on the network ID (brings up a details sheet).
      6. +
      7. Turn on "Enable Default Route".
      8. +
      9. Tap outside the details sheet to dismiss it.
      10. +
      11. Turn the connection off.
      12. +
      13. Turn the connection on again.
      14. +
      +
    • +
    • +

      Linux clients: execute the command:

      +
      $ docker exec zerotier zerotier-cli set «yourNetworkID» allowDefault=1
      +
      +

      See change option for an explanation of the output and how to turn the option off.

      +
    • +
    • +

      macOS clients: open the ZeroTier menu, then the sub-menu for the Network ID, then enable "Allow Default Router [sic] Override".

      +
    • +
    • Android and Windows clients: follow your nose.
    • +
    +
  4. +
+

Once allowDefault is enabled on a client, the routing table changes:

+
1
+2
+3
+4
+5
+6
+7
Destination     Gateway         Genmask         Flags   MSS Window  irtt Iface
+0.0.0.0         10.244.0.1      128.0.0.0       UG        0 0          0 ztr2qsmswx
+0.0.0.0         172.20.10.1     0.0.0.0         UG        0 0          0 wlan0
+10.244.0.0      0.0.0.0         255.255.0.0     U         0 0          0 ztr2qsmswx
+128.0.0.0       10.244.0.1      128.0.0.0       UG        0 0          0 ztr2qsmswx
+172.20.10.0     0.0.0.0         255.255.255.240 U         0 0          0 wlan0
+192.168.202.0   10.244.0.1      255.255.254.0   UG        0 0          0 ztr2qsmswx
+
+

Close inspection will show you that two entries have been added to the routing table:

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
LineRouteDestinationMaskAddress Range
20.0.0.0/110.244.0.1128.0.0.00.0.0.0…127.255.255.255
5128.0.0.0/110.244.0.1128.0.0.0128.0.0.0…255.255.255.255
+

Taken together, these have the same effect as a standard default route (0.0.0.0/0) but, because they are more-specific than the standard default route being offered by the cellular network, the path via ZeroTier Cloud will be preferred.

+

You can test this with a traceroute:

+
$ traceroute 8.8.8.8
+traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets
+ 1  10.244.0.1 (10.244.0.1)  98.239 ms  98.121 ms  98.042 ms
+ 2  192.168.203.1 (192.168.203.1)  98.038 ms  97.943 ms  97.603 ms
+…
+ 7  dns.google (8.8.8.8)  104.748 ms  106.669 ms  106.356 ms
+
+

This time, the first hop is via the ZeroTier Cloud to A (10.244.0.1), then out through the local router C (192.168.203.1).

+

Topology 4: Multi-site routing

+ + + + + + + + + + + +
Topology 4: Site-to-Site with ZeroTier-router
ZeroTier - topology 4
+

In this topology, everything can reach everything within your catenet. The installation process for F is the same as it was for A. See Installing ZeroTier-router.

+

In ZeroTier Central you need one "less-specific" Managed Route pointing to each site where there is a ZeroTier router.

+

At each site, the local router needs two static routes, both via the IP address of the local host running the ZeroTier-router container:

+
    +
  1. A static route pointing to the ZeroTier Cloud (10.244.0.0/16); plus
  2. +
  3. A static route covering all of 192.168.0.0/16.
  4. +
+

If the second route does not make sense, think of it like this:

+
    +
  • A packet destined for the local network (at any site) will match the more-specific routing table entry for that local network and be sent direct to the destination host.
  • +
  • Otherwise, the packet will be sent to the local router (default gateway).
  • +
  • On the router (C or D), the packet will match the less-specific static route for 192.168.0.0/16 and be forwarded to the local host running the ZeroTier-router container (one-armed routing).
  • +
  • Courtesy of the Managed Routes set in ZeroTier Central, the local host running the ZeroTier-router container (A or F) will either have a more-specific route through the ZeroTier Cloud to the destination network, or it won't.
  • +
  • If it has a more-specific route, the packet will be forwarded across the ZeroTier Cloud.
  • +
  • Otherwise the packet will be dropped and the originator will receive an "ICMP destination network unreachable" message.
  • +
+

In essence, both these static routes are "set and forget". They assume catenet growth is a possibility, and that it is preferable to set up schemes that will be robust and not need constant tweaking.

+

tunnelling remote clients

+

The diagram above for Topology 4 does not include a default route in ZeroTier Central. If you implement Topology 4 according to the diagram:

+
    +
  • traffic between G and your sites will travel via the ZeroTier Cloud (tunnelled, encrypted); but
  • +
  • traffic between G and the wider Internet will not be tunnelled, will not be encrypted by ZeroTier, and will reach the Internet via the ISP or cellular carrier.
  • +
+

If you want remote clients like G to use full tunnelling, you can follow the same approach as for Topology 3. You simply need to decide which site should used by G to reach the Internet. Having made your decision, define an appropriate default route in ZeroTier Central. For example, if G should reach the Internet via:

+
    +
  • +

    the left-hand site, the default route should point to the ZeroTier-router running on A:

    +
    Destination: 0.0.0.0/0 (via) 10.240.0.1
    +
    +
  • +
  • +

    the right-hand site, the default route should point to the ZeroTier-router running on F:

    +
    Destination: 0.0.0.0/0 (via) 10.240.0.2
    +
    +
  • +
+

Once you implement the default route, everything else is the same as for Topology 3.

+

Managed Routes

+

TL;DR

+

If your home network is a single subnet with a /24 prefix (a subnet mask of 255.255.255.0), you need to follow two rules when constructing the "destination" field of a Managed Route in ZeroTier Central:

+
    +
  1. use a /23 prefix.
  2. +
  3. if the third octet of your home network range is an odd number, subtract 1 to make it an even number.
  4. +
+

Examples:

+ + + + + + + + + + + +
Table 2: Constructing Managed Routes for Subnets - examples
ZeroTier - Managed Route examples
+

If your home network has multiple subnets and/or you do not use /24 prefixes then you should either read through the next section or consult one of the many IP address calculators that are available on the Internet. One example:

+ +

The details

+

This is a slightly contrived example but it will help you to understand why you need Managed Routes and how to construct them correctly in ZeroTier Central.

+

Assume we are talking about Topology 1 and that this is the routing table for host A:

+
1
+2
+3
Destination     Gateway         Genmask         Flags   MSS Window  irtt Iface
+0.0.0.0         192.168.203.1   0.0.0.0         UG        0 0          0 eth0
+192.168.203.0   0.0.0.0         255.255.255.0   U         0 0          0 eth0
+
+

Suppose A wants to send a packet to B. The IP stack starts searching the routing table. For each row:

+
    +
  1. +

    The destination IP address for B (192.168.203.60) is ANDed with the subnet mask (255.255.255.0). Given the last row in the routing table above:

    +
    candidate = destinationIP AND Genmask
    +          = 192.168.203.60 AND 255.255.255.0
    +          = 192.168.203.0
    +
    +
  2. +
  3. +

    The candidate (192.168.203.0) is compared with the value in the Destination column (192.168.203.0). If the two values are the same, the route is considered to be a match:

    +
    match = compareEqual(candidate,Destination)
    +      = compareEqual(192.168.203.0, 192.168.203.0)
    +      = true
    +
    +
  4. +
  5. +

    The result is a match so the packet is handed to Layer 2 for transmission via the eth0 interface.

    +
  6. +
+

Now suppose A wants to send a packet to 8.8.8.8 (Google DNS). The last row of the routing table will evaluate as follows:

+
candidate = destinationIP AND Genmask
+          = 8.8.8.8 AND 255.255.255.0
+          = 8.8.8.0
+    match = compareEqual(candidate,Destination)
+          = compareEqual(8.8.8.0, 192.168.203.0)
+          = false
+
+

The result is no-match so the routing algorithm continues to search the table. Eventually it will arrive at the 0.0.0.0 entry which is known as the "default route":

+
candidate = destinationIP AND Genmask
+          = 8.8.8.8 AND 0.0.0.0
+          = 0.0.0.0
+    match = compareEqual(candidate,Destination)
+          = compareEqual(0.0.0.0, 0.0.0.0)
+          = true
+
+

The result of comparing anything with the default route is always a match. Because the "Gateway" column is non-zero, the IP address of 192.168.203.1 (C) is used as the "next hop". The IP stack searches the routing table again. This new search for 192.168.203.1 will match on the bottom row so the packet will be handed to Layer 2 for transmission out of the eth0 interface aimed at C (the local router, otherwise known as the "default gateway"). In turn, the local router forwards the packet to the ISP and, eventually, it winds up at 8.8.8.8.

+

Let's bring ZeroTier into the mix.

+

The local subnet shown in Topology 1 is 192.168.203.0/24 so it seems to make sense to use that same subnet in a Managed Route. Assume you configured that in ZeroTier Central:

+
192.168.203.0/24 via 10.144.0.1
+
+

When the ZeroTier client on (A) adds that route to its routing table, you get something like this:

+
1
+2
+3
+4
+5
Destination     Gateway         Genmask         Flags   MSS Window  irtt Iface
+0.0.0.0         192.168.203.1   0.0.0.0         UG        0 0          0 eth0
+10.244.0.0      0.0.0.0         255.255.0.0     U         0 0          0 ztr2qsmswx
+192.168.203.0   10.244.0.1      255.255.255.0   UG        0 0          0 ztr2qsmswx
+192.168.203.0   0.0.0.0         255.255.255.0   U         0 0          0 eth0
+
+
+

To all network gurus following along: please remember this is a contrived example.

+
+

Study the last two lines. You should be able to see that both lines will match when the IP stack searches this table whenever A needs to send a packet to B. This results in a tie.

+

What normally happens is a tie-breaker algorithm kicks in. Schemes of route metrics, route weights, hop counts, round-trip times or interface priorities are used to pick a winner. Unfortunately, those schemes are all "implementation defined". Although the algorithms usually converge on a good answer, sometimes Murphy's Law kicks in. Routing problems are notoriously difficult to diagnose and can manifest in a variety of ways, ranging from sub-optimal routing, where the only symptom may be sluggishness, to forwarding loops, which can render your network mostly useless.

+

Prevention is always better than cure so it is preferable to side-step the entire problem by taking advantage of the fact that IP routing will always match on a more-specific route before a less-specific route, and employ slightly less-specific Managed Routes in ZeroTier Central.

+

What do "more-" and "less-" mean when we're talking about searching a routing table? The terms refer to the length of the network prefix. In "/X" notation, a larger value of X is more-specific than a smaller value of X:

+
    +
  • a "/25" is more specific then a "/24"
  • +
  • a "/23" is less specific than a "/24"
  • +
+

To ensure that the IP stack will always make the correct decision, the Managed Route you configure in ZeroTier Central should always be slightly less-specific than the actual subnet it covers. Given 192.168.203.0/24, your first attempt at constructing a less-specific route might be:

+
192.168.203.0/23 via 10.144.0.1
+
+

Sadly, that won't work. Why? Because the 192.168.203.0/23 subnet does not actually exist. That may surprise you but it's true. It has to do with the requirement that subnet masks use contiguous one-bits. It's easier to understand if you study the binary:

+ + + + + + + + + + + +
Table 3: Invalid vs Valid Managed Route
ZeroTier - managed route construction
+

The left hand side of Table 3 shows a network prefix of 192.168.203.0/23 along with what that /23 expands to as a subnet mask of 255.255.254.0. The last row is the result of ANDing the first two rows. Notice the right-most 1-bit in the third octet (circled). That bit hasn't made it to the last row and that's a problem.

+

What's going on here is that the right-most 1-bit in the third octet is not actually part of the network portion of the IP address; it's part of the host portion. For a network prefix to be valid, all the bits in the host portion must be zero. To put it another way, the IP address 192.168.203.0/23 is host .1.0 (ordinal 256) in subnet 192.168.202.0/23.

+

Read that last sentence again because "in subnet 192.168.202.0/23" is the clue.

+

The right hand side of Table 3 starts with network prefix 192.168.202.0/23 and ANDs it with its subnet mask. This time the host portion is all-zero. That means it's a valid subnet and, accordingly, can be the subject of a Managed Route.

+

Table 3 tells us something else about a /23 prefix. It tells us that whatever value appears in that third octet, the right-most 1-bit must always be zero. That's another way of saying that a /23 subnet is only valid if the third octet is an even number.

+

At this point, you should understand the reason for the two rules in TL;DR above, and have a better idea of what you are doing if you need to use a subnet calculator.

+

Network Design considerations

+

If you intend to set up multiple sites and route between them using ZeroTier, you need to be aware of some of the consequences that flow from how you need to configure Managed Routes.

+

First, it should be obvious that you can't have two sites with the same network prefix. You and a friend can't both be using 192.168.1.0/24 at home.

+

The second is that the set of less-specific prefixes in Managed Routes can't overlap either. If you are using the 192.168.0.0/24 subnet at home while your friend is using 192.168.1.0/24 at her home, both of your less-specific Managed Routes will be the same: 192.168.0.0/23. If you set up two Managed Routes to 192.168.0.0/23 with different "via" addresses, all the routers will think there's a single site that can be reached by multiple routes. That's a recipe for a mess.

+

Putting both of the above together, any network plan for multiple sites should assume a gap of two between subnets. For example, if you are using the subnet 192.168.0.0/24 then your friend should be using 192.168.2.0/24. Your Managed Route will be 192.168.0.0/23, and your friend's Managed Route will be 192.168.2.0/23.

+

None of this stops either you or your friend from using both of the /24 subnets that aggregate naturally under your respective /23 prefixes. For example, the single Managed Route 192.168.0.0/23 naturally aggregates two subnets:

+
    +
  • 192.168.0.0/24 - eg your Ethernet
  • +
  • 192.168.1.0/24 - eg your WiFi
  • +
+

Similarly, if you are using more than two subnets, such as:

+
    +
  • 192.168.0.0/24 - your house Ethernet
  • +
  • 192.168.1.0/24 - your house WiFi
  • +
  • 192.168.2.0/24 - your workshop WiFi
  • +
+

then you would slide your ZeroTier Managed Route prefix another bit to the left and use:

+
192.168.0.0/22 via 10.144.0.1
+
+

Notice what happens as you slide the prefix left. Things change in powers of 2:

+
    +
  • a /24 prefix Managed Route spans exactly 1 /24 subnet
  • +
  • a /23 prefix Managed Route spans exactly 2 /24 subnets
  • +
  • a /22 prefix Managed Route spans exactly 4 /24 subnets
  • +
  • a /21 prefix Managed Route spans exactly 8 /24 subnets
  • +
  • +
  • a /17 prefix Managed Route spans exactly 128 /24 subnets
  • +
+

The direct consequence of that for Managed Routes is:

+
    +
  • a /23 prefix means values in the third octet must be wholly divisible by 2
  • +
  • a /22 prefix means values in the third octet must be wholly divisible by 4
  • +
  • a /21 prefix means values in the third octet must be wholly divisible by 8
  • +
  • +
  • a /17 prefix means values in the third octet must be wholly divisible by 128
  • +
+

Understanding how adjacent subnets can be aggregated easily by changing the prefix length should also bring with it the realisation that it is unwise to use a scattergun approach when allocating the third octet among your home subnets. Consider this scheme:

+
    +
  • 192.168.0.0/24 - your Ethernet
  • +
  • 192.168.100.0/24 - your house WiFi
  • +
  • 192.168.200.0/24 - your workshop WiFi
  • +
+

You would need three /23 Managed Routes in ZeroTier Central. In addition, you would prevent anyone else in your private ZeroTier catenet from using 192.168.1.0/24, 192.168.101.0/24 and 192.168.201.0/24. It would be preferable to use a single /22 as shown in the example above.

+

Sure, that third octet can range from 0..255 but it's still a finite resource which is best used wisely, particularly once you start to contemplate using ZeroTier to span multiple sites.

+

Host mode and ports

+

The default service definition for ZeroTier-router contains the following lines:

+
13
+14
+15
  network_mode: host
+  x-ports:
+  - "9993:9993"
+
+

Line 13 tells ZeroTier to run in Docker's "host mode". This means the processes running inside the container bind to the Raspberry Pi's network ports.

+
+

Processes running inside non-host-mode containers bind to the container's ports, and then use Network Address Translation (NAT) to reach the Raspberry Pi's ports.

+
+

The x- prefix on line 14 has the effect of commenting-out the entire clause. In other words, the single x- has exactly the same meaning as:

+
14
+15
# ports:
+# - "9993:9993"
+
+

The x-ports clause is included to document the fact that ZeroTier uses the Raspberry Pi's port 9993.

+
+

Documenting the ports in use for host-mode containers helps IOTstack's maintainers avoid port conflicts when adding new containers.

+
+

You should not remove the x- prefix. If docker-compose complains about the x-ports clause, the message is actually telling you that your copy of docker-compose is obsolete and that you should upgrade.

+

The Domain Name System

+

Normal DNS

+

If you have a DNS server running somewhere in your catenet, you can ask ZeroTier to propagate that information to your ZeroTier clients. It works the same way as a DHCP server can be configured to provide the IP addresses of DNS servers when handing out leases.

+

It is a two-step process:

+
    +
  1. +

    In ZeroTier Central, find the "DNS" area, complete the (optional) "Search Domain" and (required) "Server Address" fields, then click Submit.

    +

    Examples. In Topology 4, suppose the DNS server (eg PiHole or BIND9) is host:

    +
      +
    • A, then "Server Address" = 10.244.0.1 (preferred) or 192.168.203.50 (less preferred);
    • +
    • B, then "Server Address" = 192.168.203.60
    • +
    +
  2. +
  3. +

    Each client needs to be instructed to accept the DNS configuration:

    +
      +
    • iOS clients: always enabled.
    • +
    • +

      Linux clients: execute the command:

      +
      $ docker exec zerotier zerotier-cli set «yourNetworkID» allowDNS=1
      +
      +

      See change option for an explanation of the output and how to turn the option off.

      +
    • +
    • +

      macOS clients: open the ZeroTier menu, then the sub-menu for the Network ID, then enable "Allow DNS Configuration".

      +
    • +
    • Android and Windows clients: follow your nose.
    • +
    +
  4. +
+

Notes:

+
    +
  • Notice that clients need to opt-in to receiving DNS via ZeroTier. It is generally more appropriate for remote clients to do this than devices attached to a home network. This is probably why ZeroTier-managed DNS is "always on" for iOS clients. Android clients may be the same. For local clients, is usually better to let DHCP hand out DNS servers with the lease.
  • +
  • +

    There are reports of allowDNS being unreliable on Linux clients. If you have trouble on Linux, try disabling allowDNS and add the DNS server(s) to:

    +
    /etc/resolvconf.conf
    +
    +
  • +
+

Multicast DNS

+

The ZeroTier Cloud relays multicast traffic. That means that multicast DNS (mDNS) names are propagated between ZeroTier clients and you can use those names in connection requests.

+

In terms of Topology 4, A, F and G can all reach each other using mDNS names. For example:

+
pi@a:~$ ssh pi@f.local
+
+

However, even if B and C were advertising mDNS names over 192.168.203.0/24, they would be unreachable from D, E, F and G using those mDNS names because B and C are not ZeroTier clients. The same applies to reaching D and E from A, B, C or G using mDNS names.

+

Resolving address-range conflicts

+

As your network infrastructure becomes more complex, you may find that you occasionally run into address-range conflicts that force you to consider renumbering.

+

ZeroTier Central is where you define the subnet used by the ZeroTier Cloud (eg 10.244.0.0/16), while your home router is generally where you define the subnets used on your home networks.

+

Docker typically allocates its internal subnets from 172.16/12 but it can sometimes venture into 192.168/16. Docker tries to stay clear of anything that is in use but it doesn't always have full visibility into every corner of your private catenet.

+

The IOTstack menu adds the following to your compose file:

+
networks:
+
+  default:
+    driver: bridge
+    ipam:
+      driver: default
+
+  nextcloud:
+    driver: bridge
+    internal: true
+    ipam:
+      driver: default
+
+

That structure tells docker-compose that it should construct two networks:

+
    +
  • iotstack_default
  • +
  • iotstack_nextcloud
  • +
+

but leaves it up to docker-compose to work out the details. If you need more control, you can tell docker-compose to use specific subnets by adding two lines to each network definition:

+
networks:
+
+  default:
+    driver: bridge
+    ipam:
+      driver: default
+      config:
+        - subnet: 172.30.0.0/22
+
+  nextcloud:
+    driver: bridge
+    internal: true
+    ipam:
+      driver: default
+      config:
+        - subnet: 172.30.4.0/22
+
+

A /22 is sufficient for 1,021 containers. That may seem like overkill but it doesn't really affect anything. Nevertheless, no part of those subnet prefixes is any kind of "magic number". You should feel free to use whatever subnet definitions are appropriate to your needs.

+

Note:

+
    +
  • If you are never going to run NextCloud on your Raspberry Pi, you can omit that network definition entirely. Doing so will silence unnecessary messages from docker-compose.
  • +
+

Global addressing

+

Everything in this documentation assumes you are using RFC1918 private ranges throughout your catenet. ZeroTier Cloud makes the same assumption.

+

If some parts of your private catenet are using public addressing (either officially allocated to you or "misappropriated" like the 28/7 network), you may need to enable assignment of Global addressing:

+
    +
  • iOS clients: not mentioned - likely enabled by default.
  • +
  • +

    Linux clients: execute the command:

    +
    $ docker exec zerotier zerotier-cli set «yourNetworkID» allowGlobal=1
    +
    +

    See change option for an explanation of the output and how to turn the option off.

    +
  • +
  • +

    macOS clients: open the ZeroTier menu, then the sub-menu for the Network ID, then enable "Allow Assignment of Global IPs".

    +
  • +
  • Android and Windows clients: follow your nose.
  • +
+

Allow Managed Addresses

+

The "Allow Managed Addresses" command (aka allowManaged option) is enabled by default. It gives ZeroTier permission to propagate IP addresses and route assignments. It is not a good idea to turn it off. If you turn it off accidentally, you can re-enable it either in the GUI or via:

+
$ docker exec zerotier zerotier-cli set «yourNetworkID» allowManaged=1
+
+

See change option for an explanation of the output.

+

Useful Commands

+

The commands in this section are given using this syntax:

+
$ zerotier-cli command {argument …}
+
+

When ZeroTier client software is running in a container, you can execute commands:

+
    +
  • +

    directly using docker exec:

    +
    $ docker exec zerotier zerotier-cli command {argument }
    +
    +
  • +
  • +

    or by first opening a shell into the container:

    +
    $ docker exec -it zerotier /bin/ash
    +# zerotier-cli command {argument }
    +# exit
    +$
    +
    +
  • +
+

On macOS you can run the commands from a Terminal window with sudo:

+
$ sudo zerotier-cli command {argument }
+
+

Windows, presumably, has similar functionality.

+

Networks

+

Check networks

+

To check the ZeroTier networks the client has joined:

+
$ zerotier-cli listnetworks
+200 listnetworks <nwid> <name> <mac> <status> <type> <dev> <ZT assigned ips>
+200 listnetworks 900726788b1df8e2 My_Great_Network 33:b0:c6:2e:ad:2d OK PRIVATE feth4026 10.244.0.1/16
+
+

Join network

+

To join a new ZeroTier network:

+
$ zerotier-cli join «NewNetworkID» 
+
+

Leave network

+

To leave an existing ZeroTier network:

+
$ zerotier-cli leave «ExistingNetworkID» 
+
+

Client status

+

To check the status of a device running ZeroTier client:

+
$ zerotier-cli info
+200 info 340afcaa2a 1.10.1 ONLINE
+
+

Peer status

+

To check the status of peers in your ZeroTier Networks:

+
$ zerotier-cli peers
+200 peers
+<ztaddr>   <ver>  <role> <lat> <link> <lastTX> <lastRX> <path>
+7492fd0dc5 1.10.1 LEAF       2 DIRECT 5407     5407     17.203.229.120/47647
+f14094b92a 1.10.1 LEAF     227 DIRECT 1976     1976     34.209.49.222/54643
+C88262CD64 1.10.1 LEAF       2 DIRECT 5411     5408     192.168.1.70/64408
+
+
+

Tip:

+
    +
  • +

    In the <link> column, DIRECT means ZeroTier has been able to arrange for this client (where you are running the command) and that peer to communicate directly. In other words, the traffic is not being relayed through ZeroTier's servers. Seeing RELAY in this field is not necessarily a bad thing but, to quote from the ZeroTier documentation:

    +
    +

    If you see the peer you're trying to contact in the RELAY state, that means packets are bouncing through our root servers because a direct connection between peers cannot be established. Side effects of RELAYING are increased latency and possible packet loss. See "Router Configuration Tips" above for how to resolve this.

    +
    +
  • +
+

Options

+

At the time of writing, these options are defined:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
optionLet ZeroTier …
allowDefault… modify the system's default route
allowDNS… modify the system's DNS settings
allowGlobal… manage IP addresses and Route assignments outside the RFC1918 ranges
allowManaged… manage IP addresses and Route assignments
+

Check option

+

To check an option:

+
$ zerotier-cli get «yourNetworkID» «option»
+
+

The result is either "0" (false) or "1" (true). Example:

+
$ zerotier-cli get 900726788b1df8e2 allowDNS
+0
+
+

Change option

+
    +
  • +

    To enable an option:

    +
    $ zerotier-cli set «yourNetworkID» «option»=1
    +
    +
  • +
  • +

    To disable an option:

    +
    $ zerotier-cli set «yourNetworkID» «option»=0
    +
    +
  • +
+

The response to changing an option is a large amount of JSON output. The updated state of the options is near the start. In practice, you can limit the output to just the options with a grep:

+
$ zerotier-cli set 900726788b1df8e2 allowDNS=0 | grep allow
+ "allowDNS": false,
+ "allowDefault": false,
+ "allowGlobal": false,
+ "allowManaged": true,
+
+

About persistent storage

+

Both ZeroTier-client and ZeroTier-router use the same persistent storage area. Should you choose to do so, you can freely switch back and forth between the -client and -router containers without worrying about the persistent storage area.

+

The contents of ZeroTier's persistent storage uniquely identify the client to the ZeroTier Cloud. Unlike WireGuard, it is neither safe nor prudent to copy ZeroTier's persistent storage from one Raspberry Pi to another.

+

An exception to this would be where you actually intend to move a ZeroTier client's identity to a different machine. That will work, providing your migration procedure never results in the same ZeroTier identity being in use on two machines at the same time.

+

You can erase ZeroTier's persistent storage area like this:

+
$ cd ~/IOTstack
+$ docker-compose down {zerotier-client | zerotier-router}
+$ sudo rm -rf ./volumes/zerotier-one
+
+

Tips:

+
    +
  1. always double-check sudo commands before hitting Enter.
  2. +
  3. see also if downing a container doesn't work
  4. +
+

Erasing persistent storage destroys the client's authorisation (cryptographic credentials). If you start the container again, it will construct a new identity and you will need to re-authorise the client in ZeroTier Central. You should also delete the obsolete client authorisation.

+

Container maintenance

+

ZeroTier (either -client or -router) can be kept up-to-date with routine "pulls":

+
$ cd ~/IOTstack
+$ docker-compose pull
+$ docker-compose up -d
+$ docker system prune -f
+
+

iOS tip

+

On iOS, you must decide whether to select "Custom DNS" when you define the VPN. If you want to change your mind, you need to delete the connection and start over.

+
+

Providing you don't delete the Zerotier app, the client's identity remains unchanged so you won't need to re-authorise the client in ZeroTier Central.

+
+

An example of when you might want to enable Custom DNS is if you want your remote clients to use PiHole for name services. If PiHole is running on the same Raspberry Pi as your Zerotier instance, you should use the IP address associated with the Raspberry Pi's interface to the ZeroTier Cloud (ie 10.244.0.1 in the example topologies).

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Zigbee2MQTT/index.html b/Containers/Zigbee2MQTT/index.html new file mode 100644 index 000000000..f5bc96094 --- /dev/null +++ b/Containers/Zigbee2MQTT/index.html @@ -0,0 +1,2940 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Zigbee2MQTT - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Zigbee2MQTT

+ + +

References

+ +

Definitions

+
    +
  • +

    "compose file" means the file at the path:

    +
    ~/IOTstack/docker-compose.yml
    +
    +
  • +
+

Basic process for new users

+
    +
  1. +

    Run the IOTstack menu and choose both "Mosquitto" and "Zigbee2MQTT". That adds the service definitions for both of those containers to your compose file.

    +
  2. +
  3. +

    Prepare your Zigbee adapter by flashing its firmware.

    +
  4. +
  5. Follow the steps in Identify your Zigbee adapter to work out how your adapter "mounts" on your Raspberry Pi, and edit your compose file to include that information.
  6. +
  7. +

    The default environment variables assume:

    +
      +
    • You are running Mosquitto and Zigbee2MQTT as IOTstack containers on the same computer; and
    • +
    • You want the Zigbee2MQTT web front end to be available on port 8080.
    • +
    +

    This is a good basis for getting started. If it sounds like it will meet your needs, you will not need to make any changes. Otherwise, review the environment variables and make appropriate changes to the service definition in your compose file.

    +
  8. +
  9. +

    Bring up your stack: { #upStack }

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d
    +
    +
  10. +
  11. +

    Confirm that the Zigbee2MQTT container appears to be working correctly. You should:

    + +
  12. +
  13. +

    Connect to the web front end and start adding your Zigbee devices.

    +
  14. +
+

Prepare your Zigbee adapter

+

Zigbee adapters usually need to be "flashed" before they can be used by Zigbee2MQTT. To prepare your adatper:

+
    +
  1. Go to the supported adapters page.
  2. +
  3. Find your adapter in the list.
  4. +
  5. Follow the instructions for flashing your adapter.
  6. +
+

Note:

+
    +
  • If you can't find your adapter in the list of supported devices, you may not be able to get the Zigbee2MQTT container to connect to it. This kind of problem is outside the scope of IOTstack. You will have to raise the issue with the Zigbee2MQTT project.
  • +
+

Identify your Zigbee adapter

+

This section covers adapters that connect to your Raspberry Pi via USB.

+
+

See connect to a remote adapter for information on connecting to adapters via TCP.

+
+

Many USB Zigbee adapters mount as /dev/ttyACM0 but this is not true for all adapters. In addition, if you have multiple devices connected to your Raspberry Pi that contend for a given device name, there are no guarantees that your Zigbee adapter will always be assigned the same name each time the device list is enumerated.

+

For those reasons, it is better to take the time to identify your Zigbee adapter in a manner that will be predictable, unique and reliable:

+
    +
  1. If your Zigbee adapter is connected to your Raspberry Pi, disconnect it.
  2. +
  3. +

    Run the following command (the option is the digit "1"):

    +
    $ ls -1 /dev/serial/by-id
    +
    +

    The possible response patterns are:

    +
      +
    • +

      An error message:

      +
      ls: cannot access '/dev/serial/by-id': No such file or directory
      +
      +
    • +
    • +

      A list of one or more lines where your Zigbee adapter is not present. Example:

      +
      usb-Silicon_Labs_CP2102N_USB_to_UART_Bridge_Controller_f068b8e7e82d4b119c0ee71fa1143ea0-if00-port0
      +
      +
    • +
    +

    The actual response (error, or a list of devices) does not matter. You are simply establishing a baseline.

    +
  4. +
  5. +

    Connect your prepared Zigbee adapter to a USB port on your Raspberry Pi.

    +
  6. +
  7. +

    Repeat the same ls command from step 2. The response pattern should be different from step 2. The list should now contain your Zigbee adapter. Example:

    +
    usb-Silicon_Labs_CP2102N_USB_to_UART_Bridge_Controller_f068b8e7e82d4b119c0ee71fa1143ea0-if00-port0
    +usb-Texas_Instruments_TI_CC2531_USB_CDC___0X00125A00183F06C5-if00
    +
    +

    The second line indicates a CC2531 adapter is attached to the Raspberry Pi.

    +

    If the response pattern does not change, it means the Raspberry Pi is unable to see your adapter. The two most common reasons are:

    +
      +
    1. Your adapter was not flashed correctly. Start over at prepare your Zigbee adapter.
    2. +
    3. +

      Your adapter does not mount as a serial device. Try repeating steps 2 through 4 with the command:

      +
      $ ls -1 /dev
      +
      +

      to see if you can discover how your adapter attaches to your Raspberry Pi.

      +
      +

      One example is the Electrolama zig-a-zig-ah which attaches as /dev/ttyUSB0.

      +
      +
    4. +
    +
  8. +
  9. +

    Use the output from the ls command in step 4 to form the absolute path to your Zigbee adapter. Example:

    +
    /dev/serial/by-id/usb-Texas_Instruments_TI_CC2531_USB_CDC___0X00125A00183F06C5-if00
    +
    +
  10. +
  11. +

    Check your work like this (the option is the lower-case letter "l"):

    +
    $ ls -l /dev/serial/by-id/usb-Texas_Instruments_TI_CC2531_USB_CDC___0X00125A00183F06C5-if00
    +lrwxrwxrwx 1 root root 13 Mar 31 19:49 dev/serial/by-id/usb-Texas_Instruments_TI_CC2531_USB_CDC___0X00125A00183F06C5-if00 -> ../../ttyACM0
    +
    +

    What the output is telling you is that the by-id path is a symbolic link to /dev/ttyACM0. Although this may always be true on your Raspberry Pi, the only part that is actually guaranteed to be true is the by-id path, which is why you should use it.

    +
  12. +
  13. +

    Once you have identified the path to your adapter, you communicate that information to docker-compose like this:

    +
    $ echo ZIGBEE2MQTT_DEVICE_PATH=/dev/serial/by-id/usb-Texas_Instruments_TI_CC2531_USB_CDC___0X00125A00183F06C5-if00 >>~/IOTstack/.env
    +
    +

    Note:

    +
      +
    • +

      if you forget to do this step, docker-compose will display the following error message:

      +
      parsing ~/IOTstack/docker-compose.yml: error while interpolating services.zigbee2mqtt.devices.[]: required variable ZIGBEE2MQTT_DEVICE_PATH is missing a value: eg echo ZIGBEE2MQTT_DEVICE_PATH=/dev/ttyACM0 >>~/IOTstack/.env
      +
      +
    • +
    +
  14. +
  15. +

    Continue from bring up your stack.

    +
  16. +
+

Configuration

+

Environment variables

+

Any value that can be set in a Zigbee2MQTT configuration file can also be set using an environment variable.

+
+

The Zigbee2MQTT documentation explains the syntax.

+
+

Note:

+
    +
  • Do not use quote marks to enclose the values (right hand sides) of environment variables.
  • +
+

Whenever you change the value of an environment variable, you also need to tell docker-compose to apply the change:

+
$ cd ~/IOTstack
+$ docker-compose up -d zigbee2mqtt
+
+

The default service definition provided with IOTstack includes the following environment variables:

+
    +
  • +

    ZIGBEE2MQTT_CONFIG_MQTT_SERVER=mqtt://mosquitto:1883 { #mqttServer }

    +

    Typical values for this are:

    +
      +
    • +

      mqtt://mosquitto:1883

      +

      This is default value supplied with the IOTstack template. It assumes that both Zigbee2MQTT and the Mosquitto broker are running in non-host mode containers on the same Raspberry Pi.

      +
    • +
    • +

      mqtt://localhost:1883

      +

      This would be appropriate if you were to run Zigbee2MQTT in host mode and the Mosquitto broker was running on the same Raspberry Pi.

      +
    • +
    • +

      mqtt://«host-or-ip»:1883

      +

      If the Mosquitto broker is running on a different computer, replace «host-or-ip» with the IP address or domain name of that other computer. You should also remove or comment-out the following lines from the service definition:

      +
      depends_on:
      +  - mosquitto
      +
      +

      The depends_on clause ensures that the Mosquitto container starts alongside the Zigbee2MQTT container. That would not be appropriate if Mosquitto was running on a separate computer.

      +
    • +
    +
  • +
  • +

    ZIGBEE2MQTT_CONFIG_FRONTEND=true { #frontEndEnable }

    +

    This variable activates the Zigbee2MQTT web interface on port 8080. If you want to change the port number where you access the Zigbee2MQTT web interface, see connecting to the web GUI.

    +
  • +
  • +

    ZIGBEE2MQTT_CONFIG_ADVANCED_LOG_SYMLINK_CURRENT=true { #logSymlink }

    +

    Defining this variable causes Zigbee2MQTT to create a symlink pointing to the current log folder at the path:

    +
    ~/IOTstack/volumes/zigbee2mqtt/data/log/current
    +
    +

    See Checking the log for more information about why this is useful.

    +
  • +
  • +

    - DEBUG=zigbee-herdsman* { #enableDebug }

    +

    Enabling this variable turns on extended debugging inside the container.

    +
  • +
+

Configuration file

+

Zigbee2MQTT creates a default configuration file at the path:

+
~/IOTstack/volumes/zigbee2mqtt/data/configuration.yaml
+
+

Although you can edit the configuration file, the approach recommended for IOTstack is to use environment variables.

+

If you decide to edit the configuration file:

+
    +
  1. You will need to use sudo to edit the file.
  2. +
  3. +

    After you have finished making changes, you need to inform the running container by:

    +
    $ cd ~/IOTstack
    +$ docker-compose restart zigbee2mqtt
    +
    +
  4. +
  5. +

    Check the log for errors.

    +
  6. +
+

Note:

+
    +
  • If you start Zigbee2MQTT from a clean slate (ie where the configuration file does not exist) and your compose file does not define the … MQTT_SERVER environment variable discussed above, the container will go into a restart loop. This happens because the Zigbee2MQTT container defaults to trying to reach the Mosquitto broker at localhost:1883 instead of mosquitto:1883. That usually fails.
  • +
+

Verifying basic operation

+

Checking status

+
$ docker ps | grep -e mosquitto -e zigbee2mqtt
+NAMES         CREATED          STATUS
+zigbee2mqtt   33 seconds ago   Up 30 seconds
+mosquitto     33 seconds ago   Up 31 seconds (healthy)
+
+
+

The above output is filtered down to the relevant columns

+
+

You are looking for evidence that the container is restarting (ie the "Status" column only ever shows a low number of seconds when compared with the "Created" column).

+

Checking the log

+

You can't use docker logs zigbee2mqtt to inspect the Zigbee2MQTT container's logs. That's because Zigbee2MQTT writes its logging information to the path:

+
~/IOTstack/volumes/zigbee2mqtt/data/log/yyyy-mm-dd.hh-mm-ss/log.txt
+
+

where yyyy-mm-dd.hh-mm-ss is the date and time the container was last started. This means that you have to identify the folder with the latest timestamp before you can inspect the log contained within it.

+

Fortunately, Zigbee2MQTT offers a shortcut. If the … LOG_SYMLINK_CURRENT environment variable is true then the path to the current log will be:

+
~/IOTstack/volumes/zigbee2mqtt/data/log/current/log.txt
+
+

You can use commands like cat and tail to examine the current log. Example:

+
$ cat ~/IOTstack/volumes/zigbee2mqtt/data/log/current/log.txt
+
+

Checking Mosquitto connectivity

+

To perform this check, you will need to have the Mosquitto clients installed:

+
$ sudo apt install -y mosquitto-clients
+
+

The Mosquitto clients package includes two command-line tools:

+
    +
  • mosquitto_pub for publishing MQTT messages to the broker; and
  • +
  • +

    mosquitto_sub for subscribing to MQTT messages distributed by the broker.

    +
    +

    In IOTstack, the "broker" is usually the Mosquitto container.

    +
    +
  • +
+

Assuming the Mosquitto clients are installed, you can run the following command:

+
$ mosquitto_sub -v -h "localhost" -t "zigbee2mqtt/#" -F "%I %t %p"
+
+

One of two things will happen:

+
    +
  • silence, indicating that the Zigbee2MQTT container is not able to communicate with the Mosquitto container. If this happens, you should check the Zigbee2MQTT log.
  • +
  • chatter, proving that the Zigbee2MQTT container can communicate with the Mosquitto container.
  • +
+

Terminate the mosquitto_sub command with a Controlc.

+

Connecting to the web GUI

+

Open a browser, and point it to port 8080 on your Raspberry Pi. For example:

+
http://raspberrypi.local:8080
+
+

You should see the Zigbee2MQTT interface.

+

Notes:

+
    +
  1. +

    The availability of the Zigbee2MQTT UI is governed by an environment variable. If you do not see the UI, check that … FRONTEND is defined.

    +
  2. +
  3. +

    In the URL above, port 8080 is an external port which is exposed via the following port mapping in the Zigbee2MQTT service definition:

    +
    ports:
    +  - "8080:8080"
    +
    +

    If you want to reach the Zigbee2MQTT UI via a different port, you should edit the left hand side of that mapping. For example, if you wanted to use port 10080 you would write:

    +
    ports:
    +  - "10080:8080"
    +
    +

    Do not change the internal port number on the right hand side of the mapping. To apply changes to the port mapping:

    +
    $ cd ~/IOTstack
    +$ docker-compose up -d zigbee2mqtt
    +
    +
  4. +
+

Shell access to the container

+

To open a shell inside the Zigbee2MQTT container, run:

+
$ docker exec -it zigbee2mqtt ash
+
+
+

ash is not a typo!

+
+

To close the shell and leave the container, either type "exit" and press return, or press Controld.

+

Container maintenance

+

When you become aware of a new version of Zigbee2MQTT on DockerHub, do the following:

+
$ cd ~IOTstack
+$ docker-compose pull zigbee2mqtt
+$ docker-compose up -d zigbee2mqtt
+$ docker system prune
+
+

In words:

+
    +
  1. Be in the correct directory.
  2. +
  3. The pull compares the version on your Raspberry Pi with the latest version on DockerHub, and downloads any later version.
  4. +
  5. If a newer version is downloaded, the up instantiates a new container based on the new image and performs a new-for-old swap. There is barely any downtime.
  6. +
  7. The prune cleans up the older image.
  8. +
+

You can omit the zigbee2mqtt arguments from the pull and up commands, in which case docker-compose makes an attempt to pull any available updates for all non-Dockerfile-based images, and then instantiates any new images it has downloaded.

+

Service definition change

+

This information is for existing users of the Zigbee2MQTT container.

+

The default IOTstack service definition for Zigbee2MQTT has changed:

+
    +
  • The container no longer needs to be built using a Dockerfile.
  • +
  • The Zigbee2MQTT images on DockerHub can be used "as is".
  • +
  • Environment variables supplied with the updated service definition exactly replicate the purpose of the old Dockerfile.
  • +
  • The Dockerfile supplied with the IOTstack template is deprecated but continues to be provided to maintain backwards compatibility and to avoid introducing a breaking change.
  • +
+

If you were using the Zigbee2MQTT container in IOTstack before April 2022, you should use your favourite text editor to update your compose file to conform with the new service definition.

+
+

You could run the menu, then de-select and re-select Zigbee2MQTT. That will have the effect of applying the updated service definition but it also risks overwriting any other customisations you may have in place. That is why editing your compose file is the recommended approach.

+
+

The updated service definition is included here for ease of reference:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
zigbee2mqtt:
+  container_name: zigbee2mqtt
+  image: koenkk/zigbee2mqtt:latest
+  environment:
+    - TZ=${TZ:-Etc/UTC}
+    - ZIGBEE2MQTT_CONFIG_MQTT_SERVER=mqtt://mosquitto:1883
+    - ZIGBEE2MQTT_CONFIG_FRONTEND=true
+    - ZIGBEE2MQTT_CONFIG_ADVANCED_LOG_SYMLINK_CURRENT=true
+    # - DEBUG=zigbee-herdsman*
+  ports:
+    - "8080:8080"
+  volumes:
+    - ./volumes/zigbee2mqtt/data:/app/data
+  devices:
+    - "${ZIGBEE2MQTT_DEVICE_PATH:?eg echo ZIGBEE2MQTT_DEVICE_PATH=/dev/ttyACM0 >>~/IOTstack/.env}:/dev/ttyACM0"
+  restart: unless-stopped
+  depends_on:
+    - mosquitto
+
+

The changes you should make to your existing Zigbee2MQTT service definition are:

+
    +
  1. +

    Replace the build directive:

    +
    build: ./.templates/zigbee2mqtt/.
    +
    +

    with this image directive:

    +
    image: koenkk/zigbee2mqtt:latest
    +
    +

    This causes IOTstack to use Zigbee2MQTT images "as is" from DockerHub.

    +
  2. +
  3. +

    Add these environment variables:

    +
      - ZIGBEE2MQTT_CONFIG_MQTT_SERVER=mqtt://mosquitto:1883
    +  - ZIGBEE2MQTT_CONFIG_FRONTEND=true
    +  - ZIGBEE2MQTT_CONFIG_ADVANCED_LOG_SYMLINK_CURRENT=true
    +
    +

    The first two have the identical effect to the changes previously made via the Dockerfile. The last variable makes it easier for you to find and view the current log.

    +

    See environment variables for more detail.

    +
  4. +
  5. +

    Add the dependency clause:

    +
    depends_on:
    +  - mosquitto
    +
    +

    This ensures the Mosquitto container is brought up alongside Zigbee2MQTT. The Zigbee2MQTT container goes into a restart loop if Mosquitto is not reachable so this change enforces that business rule. See … MQTT_SERVER for the situation where this might not be appropriate.

    +
  6. +
+

pre-existing configuration file

+

Environment variables in your compose file override corresponding values set in the configuration file at:

+
~/IOTstack/volumes/zigbee2mqtt/data/configuration.yaml
+
+

If you have customised your existing Zigbee2MQTT configuration file, you should review your settings for potential conflicts with the environment variables introduced by the changes to the IOTstack service definition. You can resolve any conflicts either by:

+
    +
  • removing or commenting-out conflicting environment variables; or
  • +
  • altering the environment variable values to match your configuration file.
  • +
+

The second approach is recommended because it minimises the risk that Zigbee2MQTT will go into a restart loop if the configuration file is not present when the container starts.

+

As the Zigbee2MQTT documentation explains, any option that can be set in a configuration file can also be set using an environment variable, so you may want to take the opportunity to implement all your settings as environment variables.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/Zigbee2mqttassistant/index.html b/Containers/Zigbee2mqttassistant/index.html new file mode 100644 index 000000000..f1bdaaf0e --- /dev/null +++ b/Containers/Zigbee2mqttassistant/index.html @@ -0,0 +1,2268 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Zigbee2Mqtt Assistant - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Zigbee2Mqtt Assistant

+

References

+ +

About

+

This service a web frontend which displays Zigbee2Mqtt service messages and able to control it over MQTT. For the +servie a working MQTT server is required and that have to be configured.

+

Environment Parameters

+
    +
  • Z2MA_SETTINGS__MQTTSERVER=mosquitto - The MQTT service instance which is used by Zigbee2Mqtt instance. Here, "mosquitto" is the name of the container.
  • +
  • Z2MA_SETTINGS__MQTTUSERNAME=name - Used if your MQTT service has authentication enabled. Optional.
  • +
  • Z2MA_SETTINGS__MQTTPASSWORD=password - Used if your MQTT service has authentication enabled. Optional.
  • +
  • TZ=Etc/UTC- Set to your timezone. Optional but recommended.
  • +
+

Accessing the UI

+

The Zigbee2Mqtt Assistant UI is available using port 8880. For example:

+
    +
  • http://your.local.ip.address:8880/
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Containers/images/influxdb2-chart-vs-grafana.jpeg b/Containers/images/influxdb2-chart-vs-grafana.jpeg new file mode 100644 index 000000000..17930479c Binary files /dev/null and b/Containers/images/influxdb2-chart-vs-grafana.jpeg differ diff --git a/Containers/images/influxdb2-explorer.jpeg b/Containers/images/influxdb2-explorer.jpeg new file mode 100644 index 000000000..0e7607a6c Binary files /dev/null and b/Containers/images/influxdb2-explorer.jpeg differ diff --git a/Containers/images/influxdb2-grafana-db-source.jpeg b/Containers/images/influxdb2-grafana-db-source.jpeg new file mode 100644 index 000000000..fd0729503 Binary files /dev/null and b/Containers/images/influxdb2-grafana-db-source.jpeg differ diff --git a/Containers/images/influxdb2-nodered-db-out-node.jpeg b/Containers/images/influxdb2-nodered-db-out-node.jpeg new file mode 100644 index 000000000..428f87883 Binary files /dev/null and b/Containers/images/influxdb2-nodered-db-out-node.jpeg differ diff --git a/Containers/images/influxdb2-nodered-flow-models.jpeg b/Containers/images/influxdb2-nodered-flow-models.jpeg new file mode 100644 index 000000000..59c066431 Binary files /dev/null and b/Containers/images/influxdb2-nodered-flow-models.jpeg differ diff --git a/Containers/images/influxdb2-table1.png b/Containers/images/influxdb2-table1.png new file mode 100644 index 000000000..f3283121e Binary files /dev/null and b/Containers/images/influxdb2-table1.png differ diff --git a/Containers/images/nextcloud-createadminaccount.png b/Containers/images/nextcloud-createadminaccount.png new file mode 100644 index 000000000..ac333bc3c Binary files /dev/null and b/Containers/images/nextcloud-createadminaccount.png differ diff --git a/Containers/images/nextcloud-dashboard.png b/Containers/images/nextcloud-dashboard.png new file mode 100644 index 000000000..255ae4dd8 Binary files /dev/null and b/Containers/images/nextcloud-dashboard.png differ diff --git a/Containers/images/nextcloud-network-model.jpeg b/Containers/images/nextcloud-network-model.jpeg new file mode 100644 index 000000000..2dcc77b49 Binary files /dev/null and b/Containers/images/nextcloud-network-model.jpeg differ diff --git a/Containers/images/nextcloud-networks-clause.jpeg b/Containers/images/nextcloud-networks-clause.jpeg new file mode 100644 index 000000000..4c38e2852 Binary files /dev/null and b/Containers/images/nextcloud-networks-clause.jpeg differ diff --git a/Containers/images/nextcloud-postinitialisation.png b/Containers/images/nextcloud-postinitialisation.png new file mode 100644 index 000000000..6c53a0006 Binary files /dev/null and b/Containers/images/nextcloud-postinitialisation.png differ diff --git a/Containers/images/nextcloud-recommendedapps.png b/Containers/images/nextcloud-recommendedapps.png new file mode 100644 index 000000000..f2ab3c864 Binary files /dev/null and b/Containers/images/nextcloud-recommendedapps.png differ diff --git a/Containers/images/nodered-exec-node-ssh-test.jpeg b/Containers/images/nodered-exec-node-ssh-test.jpeg new file mode 100644 index 000000000..16ba5570f Binary files /dev/null and b/Containers/images/nodered-exec-node-ssh-test.jpeg differ diff --git a/Containers/images/nodered-flow-write-persistent-file.png b/Containers/images/nodered-flow-write-persistent-file.png new file mode 100644 index 000000000..7a5e0a7be Binary files /dev/null and b/Containers/images/nodered-flow-write-persistent-file.png differ diff --git a/Containers/images/nodered-portainer-unused-image.png b/Containers/images/nodered-portainer-unused-image.png new file mode 100644 index 000000000..bacce62cb Binary files /dev/null and b/Containers/images/nodered-portainer-unused-image.png differ diff --git a/Containers/images/pgadmin4-connection.jpeg b/Containers/images/pgadmin4-connection.jpeg new file mode 100644 index 000000000..9c6700e68 Binary files /dev/null and b/Containers/images/pgadmin4-connection.jpeg differ diff --git a/Containers/images/pgadmin4-general.jpeg b/Containers/images/pgadmin4-general.jpeg new file mode 100644 index 000000000..49d81fa57 Binary files /dev/null and b/Containers/images/pgadmin4-general.jpeg differ diff --git a/Containers/images/pgadmin4-initial.jpeg b/Containers/images/pgadmin4-initial.jpeg new file mode 100644 index 000000000..c4d0fb1ec Binary files /dev/null and b/Containers/images/pgadmin4-initial.jpeg differ diff --git a/Containers/images/pihole-server-ip-discovery.png b/Containers/images/pihole-server-ip-discovery.png new file mode 100644 index 000000000..12ec58192 Binary files /dev/null and b/Containers/images/pihole-server-ip-discovery.png differ diff --git a/Containers/images/portainer-ce-set-public-ip.png b/Containers/images/portainer-ce-set-public-ip.png new file mode 100644 index 000000000..a16670c4a Binary files /dev/null and b/Containers/images/portainer-ce-set-public-ip.png differ diff --git a/Containers/images/ring-mqtt-token.png b/Containers/images/ring-mqtt-token.png new file mode 100644 index 000000000..ac14f16f5 Binary files /dev/null and b/Containers/images/ring-mqtt-token.png differ diff --git a/Containers/images/wireguard-nattable.png b/Containers/images/wireguard-nattable.png new file mode 100644 index 000000000..7d9f4acd9 Binary files /dev/null and b/Containers/images/wireguard-nattable.png differ diff --git a/Containers/images/wireguard-portmodel.jpeg b/Containers/images/wireguard-portmodel.jpeg new file mode 100644 index 000000000..6a0ab08d5 Binary files /dev/null and b/Containers/images/wireguard-portmodel.jpeg differ diff --git a/Containers/images/zerotier-cgnat-topology-dark.png b/Containers/images/zerotier-cgnat-topology-dark.png new file mode 100644 index 000000000..61708f813 Binary files /dev/null and b/Containers/images/zerotier-cgnat-topology-dark.png differ diff --git a/Containers/images/zerotier-cgnat-topology-light.png b/Containers/images/zerotier-cgnat-topology-light.png new file mode 100644 index 000000000..4a0c8cab0 Binary files /dev/null and b/Containers/images/zerotier-cgnat-topology-light.png differ diff --git a/Containers/images/zerotier-cgnat-wan-interface.jpeg b/Containers/images/zerotier-cgnat-wan-interface.jpeg new file mode 100644 index 000000000..440c80de8 Binary files /dev/null and b/Containers/images/zerotier-cgnat-wan-interface.jpeg differ diff --git a/Containers/images/zerotier-ipv4-ranges.jpeg b/Containers/images/zerotier-ipv4-ranges.jpeg new file mode 100644 index 000000000..13b0320d0 Binary files /dev/null and b/Containers/images/zerotier-ipv4-ranges.jpeg differ diff --git a/Containers/images/zerotier-managed-route-construction.jpeg b/Containers/images/zerotier-managed-route-construction.jpeg new file mode 100644 index 000000000..de2554e83 Binary files /dev/null and b/Containers/images/zerotier-managed-route-construction.jpeg differ diff --git a/Containers/images/zerotier-managed-route-examples.jpeg b/Containers/images/zerotier-managed-route-examples.jpeg new file mode 100644 index 000000000..4b7c8d71c Binary files /dev/null and b/Containers/images/zerotier-managed-route-examples.jpeg differ diff --git a/Containers/images/zerotier-topology-1-reachability.jpeg b/Containers/images/zerotier-topology-1-reachability.jpeg new file mode 100644 index 000000000..d14e1b575 Binary files /dev/null and b/Containers/images/zerotier-topology-1-reachability.jpeg differ diff --git a/Containers/images/zerotier-topology-1.jpeg b/Containers/images/zerotier-topology-1.jpeg new file mode 100644 index 000000000..68c80f432 Binary files /dev/null and b/Containers/images/zerotier-topology-1.jpeg differ diff --git a/Containers/images/zerotier-topology-2.jpeg b/Containers/images/zerotier-topology-2.jpeg new file mode 100644 index 000000000..894550ccc Binary files /dev/null and b/Containers/images/zerotier-topology-2.jpeg differ diff --git a/Containers/images/zerotier-topology-3.jpeg b/Containers/images/zerotier-topology-3.jpeg new file mode 100644 index 000000000..9171a2924 Binary files /dev/null and b/Containers/images/zerotier-topology-3.jpeg differ diff --git a/Containers/images/zerotier-topology-4.jpeg b/Containers/images/zerotier-topology-4.jpeg new file mode 100644 index 000000000..3fbe8c11f Binary files /dev/null and b/Containers/images/zerotier-topology-4.jpeg differ diff --git a/Contributing-Services/index.html b/Contributing-Services/index.html new file mode 100644 index 000000000..127a6d61d --- /dev/null +++ b/Contributing-Services/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Custom/index.html b/Custom/index.html new file mode 100644 index 000000000..6fab66946 --- /dev/null +++ b/Custom/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Default-Configs/index.html b/Default-Configs/index.html new file mode 100644 index 000000000..8defd6fb5 --- /dev/null +++ b/Default-Configs/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Developers/BuildStack-RandomPassword/index.html b/Developers/BuildStack-RandomPassword/index.html new file mode 100644 index 000000000..ec83f18b7 --- /dev/null +++ b/Developers/BuildStack-RandomPassword/index.html @@ -0,0 +1,2738 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Build Stack Random Services Password - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Build Stack Random Services Password

+

This page explains how to have a service generate a random password during build time. This will require that your service have a working options menu.

+

Keep in mind that updating strings in a service's yaml config isn't limited to passwords.

+

A word of caution

+

Many services often set a password on their initial spin up and store it internally. That means if if the password is changed by the menu afterwards, it may not be reflected in the service. By default the password specified in the documentation should be used, unless the user specifically selected to use a randomly generated one. In the future, the feature to specify a password manually may be added in, much like how ports can be customised.

+

A basic example

+

Inside the service's service.yml file, a special string can be added in for the build script to find and replace. Commonly the string is %randomPassword%, but technically any string can be used. The same string can be used multiple times for the same password to be used multiple times, and/or multiple difference strings can be used for multiple passwords. +

  mariadb:
+    image: linuxserver/mariadb
+    container_name: mariadb
+    environment:
+      - MYSQL_ROOT_PASSWORD=%randomAdminPassword%
+      - MYSQL_DATABASE=default
+      - MYSQL_USER=mariadbuser
+      - MYSQL_PASSWORD=%randomPassword%
+

+

These strings will be updated during the Prebuild Hook stage when building. The code to make this happen is shown below.

+

Code commonly used to update passwords

+

This code can basically be copy-pasted into your service's build.py file. You are welcome to expand upon it if required. It will probably be refactored into a utils function in the future to adear to DRY (Don't Repeat Yourself) practices. +

def preBuild():
+  # Multi-service load. Most services only include a single service. The exception being NextCloud where the database information needs to match between NextCloud and MariaDB (as defined in NextCloud's 'service.yml' file, not IOTstack's MariaDB).
+  with open((r'%s/' % serviceTemplate) + servicesFileName) as objServiceFile:
+    serviceYamlTemplate = yaml.load(objServiceFile)
+
+  oldBuildCache = {}
+  try:
+    with open(r'%s' % buildCache) as objBuildCache: # Load previous build, if it exists
+      oldBuildCache = yaml.load(objBuildCache)
+  except:
+    pass
+
+  buildCacheServices = {}
+  if "services" in oldBuildCache: # If a previous build does exist, load it so that we can reuse the password from it if required.
+    buildCacheServices = oldBuildCache["services"]
+
+  if not os.path.exists(serviceService): # Create the service directory for the service
+    os.makedirs(serviceService, exist_ok=True)
+
+  # Check if buildSettings file exists (from previous build), or create one if it doesn't (in the else block).
+  if os.path.exists(buildSettings):
+    # Password randomisation
+    with open(r'%s' % buildSettings) as objBuildSettingsFile:
+      piHoleYamlBuildOptions = yaml.load(objBuildSettingsFile)
+      if (
+        piHoleYamlBuildOptions["databasePasswordOption"] == "Randomise database password for this build"
+        or piHoleYamlBuildOptions["databasePasswordOption"] == "Randomise database password every build"
+        or deconzYamlBuildOptions["databasePasswordOption"] == "Use default password for this build"
+      ):
+
+        if deconzYamlBuildOptions["databasePasswordOption"] == "Use default password for this build":
+          newAdminPassword = "######" # Update to what's specified in your documentation
+          newPassword = "######" # Update to what's specified in your documentation
+        else:
+          # Generate our passwords
+          newAdminPassword = generateRandomString()
+          newPassword = generateRandomString()
+
+        # Here we loop through each service included in the current service's `service.yml` file and update the password strings.
+        for (index, serviceName) in enumerate(serviceYamlTemplate):
+          dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName]
+          if "environment" in serviceYamlTemplate[serviceName]:
+            for (envIndex, envName) in enumerate(serviceYamlTemplate[serviceName]["environment"]):
+              envName = envName.replace("%randomPassword%", newPassword)
+              envName = envName.replace("%randomAdminPassword%", newAdminPassword)
+              dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName
+
+        # If the user had selected to only update the password once, ensure the build options file is updated.
+        if (piHoleYamlBuildOptions["databasePasswordOption"] == "Randomise database password for this build"):
+          piHoleYamlBuildOptions["databasePasswordOption"] = "Do nothing"
+          with open(buildSettings, 'w') as outputFile:
+            yaml.dump(piHoleYamlBuildOptions, outputFile)
+      else: # Do nothing - don't change password
+        for (index, serviceName) in enumerate(buildCacheServices):
+          if serviceName in buildCacheServices: # Load service from cache if exists (to maintain password)
+            dockerComposeServicesYaml[serviceName] = buildCacheServices[serviceName]
+          else:
+            dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName]
+
+  # Build options file didn't exist, so create one, and also use default password (default action).
+  else:
+    print("PiHole Warning: Build settings file not found, using default password")
+    time.sleep(1)
+    newAdminPassword = "######" # Update to what's specified in your documentation
+    newPassword = "######" # Update to what's specified in your documentation
+    for (index, serviceName) in enumerate(serviceYamlTemplate):
+      dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName]
+      if "environment" in serviceYamlTemplate[serviceName]:
+        for (envIndex, envName) in enumerate(serviceYamlTemplate[serviceName]["environment"]):
+          envName = envName.replace("%randomPassword%", newPassword)
+          envName = envName.replace("%randomAdminPassword%", newAdminPassword)
+          dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName
+      piHoleYamlBuildOptions = {
+        "version": "1",
+        "application": "IOTstack",
+        "service": "PiHole",
+        "comment": "PiHole Build Options"
+      }
+
+    piHoleYamlBuildOptions["databasePasswordOption"] = "Do nothing"
+    with open(buildSettings, 'w') as outputFile:
+      yaml.dump(piHoleYamlBuildOptions, outputFile)
+
+  return True
+

+

Code for your service's menu

+

While not needed, since the default action is to create a random password, it is a good idea to allow the user to choose what to do. This can be achieved by giving them access to a password menu. This code can be placed in your service's build.py file, that will show a new menu option, allowing users to select it and be taken to a password settings screen.

+

Remember that you need to have an already working menu, and to place this code into it.

+
import signal
+
+...
+
+def setPasswordOptions():
+  global needsRender
+  global hasRebuiltAddons
+  passwordOptionsMenuFilePath = "./.templates/{currentService}/passwords.py".format(currentService=currentServiceName)
+  with open(passwordOptionsMenuFilePath, "rb") as pythonDynamicImportFile:
+    code = compile(pythonDynamicImportFile.read(), passwordOptionsMenuFilePath, "exec")
+  execGlobals = {
+    "currentServiceName": currentServiceName,
+    "renderMode": renderMode
+  }
+  execLocals = {}
+  screenActive = False
+  exec(code, execGlobals, execLocals)
+  signal.signal(signal.SIGWINCH, onResize)
+  screenActive = True
+  needsRender = 1
+
+...
+
+def createMenu():
+  global yourServicesBuildOptions
+  global serviceService
+
+  yourServicesBuildOptions = []
+  yourServicesBuildOptions.append([
+    "Your Service Password Options",
+    setPasswordOptions
+  ])
+
+  yourServicesBuildOptions.append(["Go back", goBack])
+
+

Password settings screen

+

The code for the Password settings is lengthy, but it's pasted here for convienence +

#!/usr/bin/env python3
+
+import signal
+
+def main():
+  from blessed import Terminal
+  from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine
+  from deps.consts import servicesDirectory, templatesDirectory, buildSettingsFileName
+  import time
+  import subprocess
+  import ruamel.yamls
+  import os
+
+  global signal
+  global currentServiceName
+  global menuSelectionInProgress
+  global mainMenuList
+  global currentMenuItemIndex
+  global renderMode
+  global paginationSize
+  global paginationStartIndex
+  global hideHelpText
+
+  yaml = ruamel.yaml.YAML()
+  yaml.preserve_quotes = True
+
+  try: # If not already set, then set it.
+    hideHelpText = hideHelpText
+  except:
+    hideHelpText = False
+
+  term = Terminal()
+  hotzoneLocation = [((term.height // 16) + 6), 0]
+  paginationToggle = [10, term.height - 25]
+  paginationStartIndex = 0
+  paginationSize = paginationToggle[0]
+
+  serviceService = servicesDirectory + currentServiceName
+  serviceTemplate = templatesDirectory + currentServiceName
+  buildSettings = serviceService + buildSettingsFileName
+
+  def goBack():
+    global menuSelectionInProgress
+    global needsRender
+    menuSelectionInProgress = False
+    needsRender = 1
+    return True
+
+  mainMenuList = []
+
+  hotzoneLocation = [((term.height // 16) + 6), 0]
+
+  menuSelectionInProgress = True
+  currentMenuItemIndex = 0
+  menuNavigateDirection = 0
+
+  # Render Modes:
+  #  0 = No render needed
+  #  1 = Full render
+  #  2 = Hotzone only
+  needsRender = 1
+
+  def onResize(sig, action):
+    global mainMenuList
+    global currentMenuItemIndex
+    mainRender(1, mainMenuList, currentMenuItemIndex)
+
+  def generateLineText(text, textLength=None, paddingBefore=0, lineLength=64):
+    result = ""
+    for i in range(paddingBefore):
+      result += " "
+
+    textPrintableCharactersLength = textLength
+
+    if (textPrintableCharactersLength) == None:
+      textPrintableCharactersLength = len(text)
+
+    result += text
+    remainingSpace = lineLength - textPrintableCharactersLength
+
+    for i in range(remainingSpace):
+      result += " "
+
+    return result
+
+  def renderHotZone(term, renderType, menu, selection, hotzoneLocation, paddingBefore = 4):
+    global paginationSize
+    selectedTextLength = len("-> ")
+
+    print(term.move(hotzoneLocation[0], hotzoneLocation[1]))
+
+    if paginationStartIndex >= 1:
+      print(term.center("{b}       {uaf}      {uaf}{uaf}{uaf}                                                   {ual}           {b}".format(
+        b=specialChars[renderMode]["borderVertical"],
+        uaf=specialChars[renderMode]["upArrowFull"],
+        ual=specialChars[renderMode]["upArrowLine"]
+      )))
+    else:
+      print(term.center(commonEmptyLine(renderMode)))
+
+    for (index, menuItem) in enumerate(menu): # Menu loop
+      if index >= paginationStartIndex and index < paginationStartIndex + paginationSize:
+        lineText = generateLineText(menuItem[0], paddingBefore=paddingBefore)
+
+        # Menu highlight logic
+        if index == selection:
+          formattedLineText = '-> {t.blue_on_green}{title}{t.normal} <-'.format(t=term, title=menuItem[0])
+          paddedLineText = generateLineText(formattedLineText, textLength=len(menuItem[0]) + selectedTextLength, paddingBefore=paddingBefore - selectedTextLength)
+          toPrint = paddedLineText
+        else:
+          toPrint = '{title}{t.normal}'.format(t=term, title=lineText)
+        # #####
+
+        # Menu check render logic
+        if menuItem[1]["checked"]:
+          toPrint = "     (X) " + toPrint
+        else:
+          toPrint = "     ( ) " + toPrint
+
+        toPrint = "{bv} {toPrint}  {bv}".format(bv=specialChars[renderMode]["borderVertical"], toPrint=toPrint) # Generate border
+        toPrint = term.center(toPrint) # Center Text (All lines should have the same amount of printable characters)
+        # #####
+        print(toPrint)
+
+    if paginationStartIndex + paginationSize < len(menu):
+      print(term.center("{b}       {daf}      {daf}{daf}{daf}                                                   {dal}           {b}".format(
+        b=specialChars[renderMode]["borderVertical"],
+        daf=specialChars[renderMode]["downArrowFull"],
+        dal=specialChars[renderMode]["downArrowLine"]
+      )))
+    else:
+      print(term.center(commonEmptyLine(renderMode)))
+    print(term.center(commonEmptyLine(renderMode)))
+    print(term.center(commonEmptyLine(renderMode)))
+
+
+  def mainRender(needsRender, menu, selection):
+    global paginationStartIndex
+    global paginationSize
+    term = Terminal()
+
+    if selection >= paginationStartIndex + paginationSize:
+      paginationStartIndex = selection - (paginationSize - 1) + 1
+      needsRender = 1
+
+    if selection <= paginationStartIndex - 1:
+      paginationStartIndex = selection
+      needsRender = 1
+
+    if needsRender == 1:
+      print(term.clear())
+      print(term.move_y(term.height // 16))
+      print(term.black_on_cornsilk4(term.center('IOTstack YourServices Password Options')))
+      print("")
+      print(term.center(commonTopBorder(renderMode)))
+      print(term.center(commonEmptyLine(renderMode)))
+      print(term.center("{bv}      Select Password Option                                                    {bv}".format(bv=specialChars[renderMode]["borderVertical"])))
+      print(term.center(commonEmptyLine(renderMode)))
+
+    if needsRender >= 1:
+      renderHotZone(term, needsRender, menu, selection, hotzoneLocation)
+
+    if needsRender == 1:
+      print(term.center(commonEmptyLine(renderMode)))
+      if not hideHelpText:
+        if term.height < 32:
+          print(term.center(commonEmptyLine(renderMode)))
+          print(term.center("{bv}      Not enough vertical room to render controls help text                     {bv}".format(bv=specialChars[renderMode]["borderVertical"])))
+          print(term.center(commonEmptyLine(renderMode)))
+        else: 
+          print(term.center(commonEmptyLine(renderMode)))
+          print(term.center("{bv}      Controls:                                                                 {bv}".format(bv=specialChars[renderMode]["borderVertical"])))
+          print(term.center("{bv}      [Space] to select option                                                  {bv}".format(bv=specialChars[renderMode]["borderVertical"])))
+          print(term.center("{bv}      [Up] and [Down] to move selection cursor                                  {bv}".format(bv=specialChars[renderMode]["borderVertical"])))
+          print(term.center("{bv}      [H] Show/hide this text                                                   {bv}".format(bv=specialChars[renderMode]["borderVertical"])))
+          print(term.center("{bv}      [Enter] to build and save option                                          {bv}".format(bv=specialChars[renderMode]["borderVertical"])))
+          print(term.center("{bv}      [Escape] to cancel changes                                                {bv}".format(bv=specialChars[renderMode]["borderVertical"])))
+          print(term.center(commonEmptyLine(renderMode)))
+          print(term.center(commonEmptyLine(renderMode)))
+      print(term.center(commonBottomBorder(renderMode)))
+
+  def runSelection(selection):
+    import types
+    if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType):
+      mainMenuList[selection][1]()
+    else:
+      print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0])))
+
+  def isMenuItemSelectable(menu, index):
+    if len(menu) > index:
+      if len(menu[index]) > 1:
+        if "skip" in menu[index][1] and menu[index][1]["skip"] == True:
+          return False
+    return True
+
+  def loadOptionsMenu():
+    global mainMenuList
+    mainMenuList.append(["Use default password for this build", { "checked": True }])
+    mainMenuList.append(["Randomise database password for this build", { "checked": False }])
+    mainMenuList.append(["Randomise database password every build", { "checked": False }])
+    mainMenuList.append(["Do nothing", { "checked": False }])
+
+  def checkMenuItem(selection):
+    global mainMenuList
+    for (index, menuItem) in enumerate(mainMenuList):
+      mainMenuList[index][1]["checked"] = False
+
+    mainMenuList[selection][1]["checked"] = True
+
+  def saveOptions():
+    try:
+      if not os.path.exists(serviceService):
+        os.makedirs(serviceService, exist_ok=True)
+
+      if os.path.exists(buildSettings):
+        with open(r'%s' % buildSettings) as objBuildSettingsFile:
+          yourServicesYamlBuildOptions = yaml.load(objBuildSettingsFile)
+      else:
+        yourServices = {
+          "version": "1",
+          "application": "IOTstack",
+          "service": "Your Service",
+          "comment": "Your Service Build Options"
+        }
+
+      yourServices["databasePasswordOption"] = ""
+
+      for (index, menuOption) in enumerate(mainMenuList):
+        if menuOption[1]["checked"]:
+          yourServices["databasePasswordOption"] = menuOption[0]
+          break
+
+      with open(buildSettings, 'w') as outputFile:
+        yaml.dump(yourServices, outputFile)
+
+    except Exception as err: 
+      print("Error saving Your Services Password options", currentServiceName)
+      print(err)
+      return False
+    global hasRebuiltHardwareSelection
+    hasRebuiltHardwareSelection = True
+    return True
+
+  def loadOptions():
+    try:
+      if not os.path.exists(serviceService):
+        os.makedirs(serviceService, exist_ok=True)
+
+      if os.path.exists(buildSettings):
+        with open(r'%s' % buildSettings) as objBuildSettingsFile:
+          yourServicesYamlBuildOptions = yaml.load(objBuildSettingsFile)
+
+        for (index, menuOption) in enumerate(mainMenuList):
+          if menuOption[0] == yourServicesYamlBuildOptions["databasePasswordOption"]:
+            checkMenuItem(index)
+            break
+
+    except Exception as err: 
+      print("Error loading Your Services Password options", currentServiceName)
+      print(err)
+      return False
+    return True
+
+
+  if __name__ == 'builtins':
+    global signal
+    term = Terminal()
+    signal.signal(signal.SIGWINCH, onResize)
+    loadOptionsMenu()
+    loadOptions()
+    with term.fullscreen():
+      menuNavigateDirection = 0
+      mainRender(needsRender, mainMenuList, currentMenuItemIndex)
+      menuSelectionInProgress = True
+      with term.cbreak():
+        while menuSelectionInProgress:
+          menuNavigateDirection = 0
+
+          if not needsRender == 0: # Only rerender when changed to prevent flickering
+            mainRender(needsRender, mainMenuList, currentMenuItemIndex)
+            needsRender = 0
+
+          key = term.inkey(esc_delay=0.05)
+          if key.is_sequence:
+            if key.name == 'KEY_TAB':
+              if paginationSize == paginationToggle[0]:
+                paginationSize = paginationToggle[1]
+              else:
+                paginationSize = paginationToggle[0]
+              mainRender(1, mainMenuList, currentMenuItemIndex)
+            if key.name == 'KEY_DOWN':
+              menuNavigateDirection += 1
+            if key.name == 'KEY_UP':
+              menuNavigateDirection -= 1
+            if key.name == 'KEY_ENTER':
+              if saveOptions():
+                return True
+              else:
+                print("Something went wrong. Try saving the list again.")
+            if key.name == 'KEY_ESCAPE':
+              menuSelectionInProgress = False
+              return True
+          elif key:
+            if key == ' ': # Space pressed
+              checkMenuItem(currentMenuItemIndex) # Update checked list
+              needsRender = 2
+            elif key == 'h': # H pressed
+              if hideHelpText:
+                hideHelpText = False
+              else:
+                hideHelpText = True
+              mainRender(1, mainMenuList, currentMenuItemIndex)
+
+          if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item
+            currentMenuItemIndex += menuNavigateDirection
+            currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList)
+            needsRender = 2
+
+            while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex):
+              currentMenuItemIndex += menuNavigateDirection
+              currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList)
+    return True
+
+  return True
+
+originalSignalHandler = signal.getsignal(signal.SIGINT)
+main()
+signal.signal(signal.SIGWINCH, originalSignalHandler)
+

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Developers/BuildStack-Services/index.html b/Developers/BuildStack-Services/index.html new file mode 100644 index 000000000..3748dd237 --- /dev/null +++ b/Developers/BuildStack-Services/index.html @@ -0,0 +1,2442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Build Stack Services system - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Build Stack Services system

+

This page explains how the build stack system works for developers.

+

How to define a new service

+

A service only requires 2 files: +* service.yml - Contains data for docker-compose +* build.py - Contains logic that the menu system uses.

+

A basic service

+

Inside the service.yml is where the service data for docker-compose is housed, for example: +

adminer:
+  container_name: adminer
+  image: adminer
+  restart: unless-stopped
+  ports:
+    - "9080:8080"
+
+It is important that the service name match the directory that it's in - that means that the adminer service must be placed into a folder called adminer inside the ./.templates directory.

+

Basic build code for service

+

At the very least, the build.py requires the following code: +

#!/usr/bin/env python3
+
+issues = {} # Returned issues dict
+buildHooks = {} # Options, and others hooks
+haltOnErrors = True
+
+# Main wrapper function. Required to make local vars work correctly
+def main():
+  global currentServiceName # Name of the current service
+
+  # This lets the menu know whether to put " >> Options " or not
+  # This function is REQUIRED.
+  def checkForOptionsHook():
+    try:
+      buildHooks["options"] = callable(runOptionsMenu)
+    except:
+      buildHooks["options"] = False
+      return buildHooks
+    return buildHooks
+
+  # This function is REQUIRED.
+  def checkForPreBuildHook():
+    try:
+      buildHooks["preBuildHook"] = callable(preBuild)
+    except:
+      buildHooks["preBuildHook"] = False
+      return buildHooks
+    return buildHooks
+
+  # This function is REQUIRED.
+  def checkForPostBuildHook():
+    try:
+      buildHooks["postBuildHook"] = callable(postBuild)
+    except:
+      buildHooks["postBuildHook"] = False
+      return buildHooks
+    return buildHooks
+
+  # This function is REQUIRED.
+  def checkForRunChecksHook():
+    try:
+      buildHooks["runChecksHook"] = callable(runChecks)
+    except:
+      buildHooks["runChecksHook"] = False
+      return buildHooks
+    return buildHooks
+
+  # Entrypoint for execution
+  if haltOnErrors:
+    eval(toRun)()
+  else:
+    try:
+      eval(toRun)()
+    except:
+      pass
+
+# This check isn't required, but placed here for debugging purposes
+global currentServiceName # Name of the current service
+if currentServiceName == 'adminer': # Make sure you update this.
+  main()
+else:
+  print("Error. '{}' Tried to run 'adminer' config".format(currentServiceName))
+
+This code doesn't have any port conflicting checking or menu code in it, and just allows the service to be built as is. The best way to learn on extending the functionality of the service's build script is to look at the other services' build scripts. You can also check out the advanced sections on adding menus and checking for issues for services though for a deeper explanation of specific situations.

+

Basic code for a service that uses bash

+

If Python isn't your thing, here's a code blob you can copy and paste. Just be sure to update the lines where the comments start with --- +

#!/usr/bin/env python3
+
+issues = {} # Returned issues dict
+buildHooks = {} # Options, and others hooks
+haltOnErrors = True
+
+# Main wrapper function. Required to make local vars work correctly
+def main():
+  import subprocess
+  global dockerComposeServicesYaml # The loaded memory YAML of all checked services
+  global toRun # Switch for which function to run when executed
+  global buildHooks # Where to place the options menu result
+  global currentServiceName # Name of the current service
+  global issues # Returned issues dict
+  global haltOnErrors # Turn on to allow erroring
+
+  from deps.consts import servicesDirectory, templatesDirectory, volumesDirectory, servicesFileName
+
+  # runtime vars
+  serviceVolume = volumesDirectory + currentServiceName # Unused in example
+  serviceService = servicesDirectory + currentServiceName # Unused in example
+  serviceTemplate = templatesDirectory + currentServiceName
+
+  # This lets the menu know whether to put " >> Options " or not
+  # This function is REQUIRED.
+  def checkForOptionsHook():
+    try:
+      buildHooks["options"] = callable(runOptionsMenu)
+    except:
+      buildHooks["options"] = False
+      return buildHooks
+    return buildHooks
+
+  # This function is REQUIRED.
+  def checkForPreBuildHook():
+    try:
+      buildHooks["preBuildHook"] = callable(preBuild)
+    except:
+      buildHooks["preBuildHook"] = False
+      return buildHooks
+    return buildHooks
+
+  # This function is REQUIRED.
+  def checkForPostBuildHook():
+    try:
+      buildHooks["postBuildHook"] = callable(postBuild)
+    except:
+      buildHooks["postBuildHook"] = False
+      return buildHooks
+    return buildHooks
+
+  # This function is REQUIRED.
+  def checkForRunChecksHook():
+    try:
+      buildHooks["runChecksHook"] = callable(runChecks)
+    except:
+      buildHooks["runChecksHook"] = False
+      return buildHooks
+    return buildHooks
+
+  # This service will not check anything unless this is set
+  # This function is optional, and will run each time the menu is rendered
+  def runChecks():
+    checkForIssues()
+    return []
+
+  # This function is optional, and will run after the docker-compose.yml file is written to disk.
+  def postBuild():
+    return True
+
+  # This function is optional, and will run just before the build docker-compose.yml code.
+  def preBuild():
+    execComm = "bash {currentServiceTemplate}/build.sh".format(currentServiceTemplate=serviceTemplate) # --- You may want to change this
+    print("[Wireguard]: ", execComm) # --- Ensure to update the service name with yours
+    subprocess.call(execComm, shell=True) # This is where the magic happens
+    return True
+
+  # #####################################
+  # Supporting functions below
+  # #####################################
+
+  def checkForIssues():
+    return True
+
+  if haltOnErrors:
+    eval(toRun)()
+  else:
+    try:
+      eval(toRun)()
+    except:
+      pass
+
+# This check isn't required, but placed here for debugging purposes
+global currentServiceName # Name of the current service
+if currentServiceName == 'wireguard': # --- Ensure to update the service name with yours
+  main()
+else:
+  print("Error. '{}' Tried to run 'wireguard' config".format(currentServiceName)) # --- Ensure to update the service name with yours
+

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Developers/Git-Setup/index.html b/Developers/Git-Setup/index.html new file mode 100644 index 000000000..fc74fd4f1 --- /dev/null +++ b/Developers/Git-Setup/index.html @@ -0,0 +1,2494 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Git Setup - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Git Setup

+ +

How to setup and use git for IOTstack development.

+
    +
  1. First, create a + fork of + SensorsIot/IOTstack on github. And + setup + your ssh-keys.
  2. +
  3. Clone your fork and setup your github username and email +
    $ git clone git@github.com:<username>/IOTstack.git
    +$ cd IOTstack
    +$ git config user.name <username>
    +$ git config user.email <1234>+<username>@users.noreply.github.com
    +
  4. +
  5. Add up the SensorsIot/IOTstack upstream +
    $ git remote add upstream https://github.com/SensorsIot/IOTstack.git
    +
  6. +
  7. Configure for ease of operation +
    $ git config fetch.prune true
    +$ git config remote.pushDefault origin
    +$ git config --add remote.origin.fetch "^refs/heads/gh-pages"
    +$ git config --add remote.upstream.fetch "^refs/heads/gh-pages"
    +$ git config branch.master.mergeoptions "--no-ff"
    +$ git config fetch.parallel 0
    +$ git fetch --all
    +
  8. +
+

Make a pull-request

+
flowchart LR
+  upstream["upstream (SensorsIOT)"] -- "1. git fetch + git checkout -b"
+    --> local[local branch]
+  local -- "2. git commit" --> local
+  local -- "3. git push" --> origin["origin (your fork)"]
+  origin -- "3. create github pull-request" --> upstream
+

Please see Contributing for instructions on how to write commit +messages.

+

$ git fetch upstream
+$ git checkout -b <your-descriptive-branch-name> upstream/master
+...coding and testing...
+$ git add <your new or changed file>
+Check everything has been added:
+$ git status
+$ git commit
+$ git push
+
+When you execute git push, its output should have a link for creating the +pull-request to github.

+

Common operations

+

Show compact history with "git lg"

+
$ git config alias.lg "log --color --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit"
+
+

Remove branches of merged pull-requests.

+

When your pull-requests have been merged, their branches aren't needed anymore. +Remove them to reduce clutter and distractions. The master branch is never +deleted.

+
$ git fetch --all
+$ git checkout master
+$ git branch -r --merged upstream/master | \
+    grep -v origin/master$ | grep origin | sed 's/origin\///' | \
+    xargs -I 'B' git push --delete origin B
+$ git branch --merged upstream/master | grep -v "  master$" | \
+    xargs -I'B' git branch -d B
+
+

Advanced topics

+

Fetch all pull-requests as branches

+

This is handy for easily testing out other persons' suggested changes. The +branches are of course fetch-only, and you can't push your own commits to them.

+
$ git config --add remote.upstream.fetch +refs/pull/*/head:refs/remotes/upstream/pr-*
+$ git fetch upstream
+
+

Note: Everything below requires this.

+

Show up-to-date branches not merged

+

Branches that include the latest upstream/master, but are not merged to +your current branch, are potentially mergeable pull-requests. This is useful +for identifying which pull-requests you should be able to merge without +conflict.

+
$ git fetch upstream
+$ git branch -r --contains upstream/master --no-merged upstream/master
+
+

Check pull-requests on Github can be merged without conflicts

+

In git, the only way to know if a branch can be merged without a conflict, is +by actually doing the merge. An alias to (re-)create a branch named +merge-test and do merges into it:

+
$ git config alias.test-pull-request-merge $'!f() { : git merge && \
+    OPENPULLS=$(curl -s \'https://api.github.com/repos/SensorsIot/IOTstack/pulls?base=master&per_page=100\' | \
+        grep "^.....number" | sed -E \'s/.* ([0-9]+),/  upstream\\/pr-\\1/\') && \
+    git fetch upstream && git checkout -B merge-test upstream/master && \
+    git branch -r --contains upstream/master --no-merged upstream/master | \
+    grep upstream/pr- | sort - <(echo "$OPENPULLS") | \
+    { uniq -d; [[ "$1" ]] && echo "$1"; } | \
+    xargs -I B sh -c "echo Merging B && \
+        git merge --no-rerere-autoupdate --no-ff --quiet B || \
+        { echo ***FAILED TO MERGE B && exit 255; };" ;}; f'
+
+ + +

Then use this alias combined with git checkout -, returning your working copy +back to the original branch if all merges succeeded:

+
$ git test-pull-request-merge && git checkout -
+
+

This merges all branches that are: a) currently open pull requests and b) +up-to-date, i.e. contains upstream/master and c) not merged already and d) the +optional provided argument. Note: won't ignore draft pull-requests. If it +encounters a failure, it stops immediately to let you inspect the conflict.

+
+

Failed merge?

+

If there was a merge-conflict, inspect it e.g. using git diff, but +don't do any real work or conflict resolution in the merge-test branch. +When you have understood the merge-conflict and want to leave the +merge-test branch, abort the failed merge and switch to your actual branch:

+
$ git diff
+$ git merge --abort
+$ git checkout <your-PR-branch-that-resulted-in-the-conflict>
+
+
+

Check your branch doesn't conflict with any existing pull-request

+

When you intend to submit a pull-request you might want to check that it won't +conflict with any of the existing pull-requests.

+
    +
  1. Commit all your changes into your pull request branch.
  2. +
  3. +

    Use the alias from the previous "Test all current pull-requests..."-topic + to test merging your branch in addition to all current pull request:

    +
    $ git test-pull-request-merge <your-pull-request-branch> && git checkout -
    +
    +

    If there is a merge-conflict, see "Failed merge?" above.

    +
  4. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Developers/Menu-System/index.html b/Developers/Menu-System/index.html new file mode 100644 index 000000000..30bc7b904 --- /dev/null +++ b/Developers/Menu-System/index.html @@ -0,0 +1,2535 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Menu system - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Menu system

+

This page explains how the menu system works for developers.

+

Background

+

Originally this script was written in bash. After a while it became obvious that bash wasn't well suited to dealing with all the different types of configuration files, and logic that goes with configuring everything. IOTstack needs to be accessible to all levels of programmers and tinkerers, not just ones experienced with Linux and bash. For this reason, it was rewritten in Python since the language syntax is easier to understand, and is more commonly used for scripting and programming than bash. Bash is still used in IOTstack where it makes sense to use it, but the menu system itself uses Python. The code it self while not being the most well structured or efficient, was intentionally made that way so that beginners and experienced programmers could contribute to the project. We are always open to improvements if you have suggestions.

+ +

Each screen of the menu is its own Python script. You can find most of these in the ./scripts directory. When you select an item from the menu, and it changes screens, it actually dynamically loads and executes that Python script. It passes data as required by placing it into the global variable space so that both the child and the parent script can access it.

+

Injecting and getting globals in a child script

+
with open(childPythonScriptPath, "rb") as pythonDynamicImportFile:
+  code = compile(pythonDynamicImportFile.read(), childPythonScriptPath, "exec")
+execGlobals = {
+  "globalKeyName": "globalKeyValue"
+}
+execLocals = {}
+print(globalKeyName) # Will print out 'globalKeyValue'
+exec(code, execGlobals, execLocals)
+print(globalKeyName) # Will print out 'newValue'
+
+

Reading and writing global variables in a child script

+
def someFunction:
+  global globalKeyName
+  print(globalKeyName) # Will print out 'globalKeyValue'
+  globalKeyName = "newValue"
+
+

Each menu is its own python executable. The entry point is down the bottom of the file wrapped in a main() function to prevent variable scope creep.

+

The code at the bottom of the main() function: +

if __name__ == 'builtins':
+

+

Is actually where the execution path runs, all the code above it is just declared so that it can be called without ordering or scope issues.

+

Optimisations

+

It was obvious early on that the menu system would be slow on lower end devices, such as the Raspberry Pi, especially if it were rending a 4k terminal screen from a desktop via SSH. To mitigate this issue, not all of the screen is redrawn when there is a change. A "Hotzone" as it's called in the code, is usually rerendered when there's a change (such as pressing up or down to change an item selection, but not when scrolling). Full screen redraws are expensive and are only used when required, for example, when scrolling the pagination, selecting or deselecting a service, expanding or collapsing the menu and so on.

+

Environments and encoding

+

At the very beginning of the main menu screen (./scripts/main_menu.py) the function checkRenderOptions() is run to determine what characters can be displayed on the screen. It will try various character sets, and eventually default to ASCII if none of the fancier stuff can be rendered. This setting is passed into of the sub menus through the submenu's global variables so that they don't have to recheck when they load.

+ +

From the main screen, you will see several sections leading to various submenus. Most of these menus work in the same way as the main menu. The only exception to this rule is the Build Stack menu, which is probably the most complex part of IOTstack.

+

Build Stack Menu

+

Path: ./scripts/buildstack_menu.py

+

Loading

+
    +
  1. Upon loading, the Build Stack menu will get a list of folders inside the ./templates directory and check for a build.py file inside each of them. This can be seen in the generateTemplateList() function, which is executed before the first rendering happens.
  2. +
  3. The menu will then check if the file ./services/docker-compose.save.yml exists. This file is used to save the configuration of the last build. This happens in the loadCurrentConfigs() function. It is important that the service name in the compose file matches the folder name, any service that doesn't will either cause an error, or won't be loaded into the menu.
  4. +
  5. If a previous build did exist the menu will then run the prepareMenuState() function that basically checks which items should be ticked, and check for any issues with the ticked items by running checkForIssues().
  6. +
+

Selection and deselection

+

When an item is selected, 3 things happen: +1. Update the UI variable (menu) with function checkMenuItem(selectionIndex) to let the user know the current state. +2. Update the array holding every checked item setCheckedMenuItems(). It uses the UI variable (menu) to know which items are set. +3. Check for any issues with the new list of selected items by running checkForIssues().

+

Check for options (submenus of services)

+

During a full render sequence (this is not a hotzone render), the build stack menu checks to see if each of the services has an options menu. It does this by executing the build.py script of each of the services and passing in checkForOptionsHook into the toRun global variable property to see if the script has a runOptionsMenu function. If the service's function result is true, without error, then the options text will appear up for that menu item.

+

Check for issues

+

When a service is selected or deselected on the menu, the checkForIssues() function is run. This function iterates through each of the selected menu items' folders executing the build.py script and passing in checkForRunChecksHook into the toRun global variable property to see if the script has a runChecks function. The runChecks function is different depending on the service, since each service has its own requirements. Generally though, the runChecks function should check for conflicting port conflicts again any of the other services that are enabled. The menu will still allow you to build the stack, even if issues are present, assumine there's no errors raised during the build process.

+

Prebuild hook

+

Pressing enter on the Build Stack menu kicks off the build process. The Build Stack menu will execute the runPrebuildHook() function. This function iterates through each of the selected menu items' folders executing the build.py script and passing in checkForPreBuildHook into the toRun global variable property to see if the script has a preBuild function. The preBuild function is different depending on the service, since each service has its own requirements. Some services may not even use the prebuild hook. The prebuild is very useful for setting up the services' configuration however. For example, it can be used to autogenerate a password for a paticular service, or copy and modify a configuration file from the ./.templates directory into the ./services or ./volumes directory.

+

Postbuild hook

+

The Build Stack menu will execute the runPostBuildHook() function in the final step of the build process, after the docker-compose.yml file has been written to disk. This function iterates through each of the selected menu items' folders executing the build.py script and passing in checkForPostBuildHook into the toRun global variable property to see if the script has a postBuild function. The postBuild function is different depending on the service, since each service has its own requirements. Most services won't require this function, but it can be useful for cleaning up temporary files and so on.

+

The build process

+

The selected services' yaml configuration is already loaded into memory before the build stack process is started.

+
    +
  1. Run prebuildHooks.
  2. +
  3. Read ./.templates/docker-compose-base.yml file into a in memory yaml structure.
  4. +
  5. Add selected services into the in memory structure.
  6. +
  7. If it exists merge the ./compose-override.yml file into memory
  8. +
  9. Write the in memory yaml structure to disk ./docker-compose.yml.
  10. +
  11. Run postbuildHooks.
  12. +
  13. Run postbuild.sh if it exists, with the list of services built.
  14. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Developers/PostBuild-Script/index.html b/Developers/PostBuild-Script/index.html new file mode 100644 index 000000000..3e8debd99 --- /dev/null +++ b/Developers/PostBuild-Script/index.html @@ -0,0 +1,2272 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Postbuild BASH Script - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Postbuild BASH Script

+

The postbuild bash script allows for executing arbitrary execution of bash commands after the stack has been build.

+

How to use

+

Place a file in the main directory called postbuild.sh. When the buildstack build logic finishes, it'll execute the postbuild.sh script, passing in each service selected from the buildstack menu as a parameter. This script is run each time the buildstack logic runs.

+

Updates

+

The postbuild.sh file has been added to gitignore, so it won't be updated by IOTstack when IOTstack is updated. It has also been added to the backup script so that it will be backed up with your personal IOTstack backups.

+

Example postbuild.sh script

+

The following script will print out each of the services built, and a custom message for nodered. If it was the first time the script was executed, it'll also output "Fresh Install" at the end, using a .install_tainted file for knowing. +

#!/bin/bash
+
+for iotstackService in "$@"
+do
+  echo "$iotstackService"
+  if [ "$iotstackService" == "nodered" ]; then
+    echo "NodeRed Installed!"
+  fi
+done
+
+if [ ! -f .install_tainted ]; then
+  echo "Fresh Install!"
+  touch .install_tainted
+fi
+

+

What is my purpose?

+

The postbuild script can be used to run custom bash commands, such as moving files, or issuing commands that your services expect to be completed before running.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Developers/index.html b/Developers/index.html new file mode 100644 index 000000000..e2d34876b --- /dev/null +++ b/Developers/index.html @@ -0,0 +1,2357 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Contributing - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Contributing

+

We welcome pull-requests.

+

For larger contributions, please open an issue describing your idea. It +may provide valuable discussion and feedback. It also prevents the unfortunate +case of two persons working on the same thing. There's no need to wait for any +approval.

+
+

Development guidelines

+
    +
  • It-just-works - use good defaults that will work well for a first time user
  • +
  • Keep-it-simple - try to keep stuff beginner-friendly and don't go too + deep into advanced topics
  • +
+
+

Writing documentation

+
+

Tip

+

For simple changes you can straight-up just use the edit link available on +every documentation page. It's the pen-icon to the right of the top +heading. Write your changes, check the preview-tab everything looks as +expected and submit as proposed changes.

+
+

Documentation is is written as markdown, processed using mkdocs (docs) and the Material theme (docs). The Material theme is not just styling, but provides additional syntax extensions.

+

To test your local changes while writing them and before making a pull-request, +start a local mkdocs server: +

$ ~/IOTstack/scripts/development/mkdocs-serve.sh
+
+And then open http://127.0.0.1:8000/ in a browser.

+

Creating a new service

+

In this section you can find information on how to contribute a service to IOTstack. We are generally very accepting of new services where they are useful. Keep in mind that if it is not IOTstack, selfhosted, or automation related we may not approve the PR.

+

Services will grow over time, we may split up the buildstack menu into subsections or create filters to make organising all the services we provide easier to find.

+

Checks

+
    +
  • service.yml file is correct
  • +
  • build.py file is correct
  • +
  • Service allows for changing external WUI port from Build Stack's options menu if service uses a HTTP/S port
  • +
  • Use a default password, or allow the user to generate a random password for the service for initial installation. If the service asks to setup an account this can be ignored.
  • +
  • Ensure Default Configs is updated as required. A helper script (default_ports_md_generator.sh) exists to simplify this.
  • +
  • Must detect port conflicts with other services on BuildStack Menu.
  • +
  • Pre and Post hooks work with no errors.
  • +
  • Does not require user to edit config files in order to get the service running.
  • +
  • Ensure that your service can be backed up and restored without errors or data loss.
  • +
  • Any configs that are required before getting the service running should be configured in the service's options menu (and a BuildStack menu Issue should be displayed if not).
  • +
  • Fork the repo and push the changes to your fork. Create a cross repo PR for the mods to review. We may request additional changes from you.
  • +
+

Commit message

+
service_name: Add/Fix/Change feature or bug summary
+
+Optional longer description of the commit. What is changed and why it
+is changed. Wrap at 72 characters.
+
+* You can use markdown formating as this will automatically be the
+  description of your pull-request.
+* End by adding any issues this commit fixes, one per line:
+
+Fixes #1234
+Fixes #4567
+
+
    +
  1. +

    The first line is a short description. Keep it short, aim for 50 + characters. This is like the subject of an email. It shouldn't try to fully + or uniquely describe what the commit does. More importantly it should aim + to inform why this commit was made.

    +

    service_name - service or project-part being changed, e.g. influxdb, +grafana, docs. Documentation changes should use the the name of the +service. Use docs if it's changes to general documentation. If all else +fails, use the folder-name of the file you are changing. Use lowercase.

    +

    Add/Fix/Change - what type of an change this commit is. Capitalized.

    +

    feature or bug summary - free very short text giving an idea of why/what.

    +
  2. +
  3. +

    Empty line.

    +
  4. +
  5. +

    A longer description of what and why. Wrapped to 72 characters.

    +

    Use github issue linking +to automatically close issues when the pull-request of this commit is +merged.

    +
  6. +
+

For tips on how to use git, see Git Setup.

+

Follow up

+

If your new service is approved and merged then congratulations! Please watch the Issues page on github over the next few days and weeks to see if any users have questions or issues with your new service.

+

Links:

+ + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Docker-commands/index.html b/Docker-commands/index.html new file mode 100644 index 000000000..170e0523b --- /dev/null +++ b/Docker-commands/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Getting-Started/index.html b/Getting-Started/index.html new file mode 100644 index 000000000..5c1544b15 --- /dev/null +++ b/Getting-Started/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/How-the-script-works/index.html b/How-the-script-works/index.html new file mode 100644 index 000000000..356387930 --- /dev/null +++ b/How-the-script-works/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Menu-System/index.html b/Menu-System/index.html new file mode 100644 index 000000000..3d2152e98 --- /dev/null +++ b/Menu-System/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Misc/index.html b/Misc/index.html new file mode 100644 index 000000000..356387930 --- /dev/null +++ b/Misc/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Native-RTL_433/index.html b/Native-RTL_433/index.html new file mode 100644 index 000000000..356387930 --- /dev/null +++ b/Native-RTL_433/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Networking/index.html b/Networking/index.html new file mode 100644 index 000000000..3a72f81a3 --- /dev/null +++ b/Networking/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/PostBuild-Script/index.html b/PostBuild-Script/index.html new file mode 100644 index 000000000..c68577ad6 --- /dev/null +++ b/PostBuild-Script/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/RPIEasy_native/index.html b/RPIEasy_native/index.html new file mode 100644 index 000000000..356387930 --- /dev/null +++ b/RPIEasy_native/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Understanding-Containers/index.html b/Understanding-Containers/index.html new file mode 100644 index 000000000..11fb9a896 --- /dev/null +++ b/Understanding-Containers/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Updates/Changelog/index.html b/Updates/Changelog/index.html new file mode 100644 index 000000000..a8753db58 --- /dev/null +++ b/Updates/Changelog/index.html @@ -0,0 +1,2280 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Changelog - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Changelog

+ +

Latest

+

(may include items not yet merged)

+ + +
    +
  • Fixes to bash aliases.
  • +
  • Timescaledb template fixed and public port now mapped to 5433.
  • +
+ + +

2022-06-12

+
    +
  • Dockerfile based Zigbee2MQTT deprecated, requiring migration.
  • +
  • New service: Duckdns, deprecates the + duck/duck.sh script.
  • +
  • New service: Influxdb 2, supported only on + fully 64bit systems.
  • +
  • Docker health checks added to Grafana and InfluxDB.
  • +
+

2022-04-26

+ +

2022-01-18

+ + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Updates/New-Menu-Release-Notes/index.html b/Updates/New-Menu-Release-Notes/index.html new file mode 100644 index 000000000..e9bb21965 --- /dev/null +++ b/Updates/New-Menu-Release-Notes/index.html @@ -0,0 +1,2275 @@ + + + + + + + + + + + + + + + + + + + + + + + + + New IOTstack Menu - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

New IOTstack Menu

+

Background

+

Originally this script was written in bash. After a while it became obvious that bash wasn't well suited to dealing with all the different types of configuration files, and logic that goes with configuring everything. IOTstack needs to be accessible to all levels of programmers and tinkerers, not just ones experienced with Linux and bash. For this reason, it was rewritten in Python since the language syntax is easier to understand, and is more commonly used for scripting and programming than bash. Bash is still used in IOTstack where it makes sense to use it, but the menu system itself uses Python. The code is intentionally made so that beginners and experienced programmers could contribute to the project. We are always open to improvements if you have suggestions.

+

On-going improvements

+

There are many features that are needing to be introduced into the new menu system. From meta tags on services for filtering, to optional nginx autoconfiguration and authentication. For this reason you may initially experience bugs (very hard to test every type of configuration!). The new menu system has been worked on and tested for 6 months and we think it's stable enough to merge into the master branch for mainstream usage. The code still needs some work to make it easier to add new services and to not require copy pasting the same code for each new service. Also to make the menu system not be needed at all (so it can be automated with bash scripts).

+

Breaking changes

+

There are a few changes that you need to be aware of:

+
    +
  • Docker Environmental *.env files are no longer a thing by default. Everything needed is specified in the service.yml file, you can still optionally use them though either with Custom Overrides or with the PostBuild script. Specific config files for certain services still work as they once did.
  • +
  • Python 3, pip3, PyYAML and Blessed are all required to be installed.
  • +
  • Not backwards compatible with old menu system. You will be able to switch back to the old menu system for a period of time by changing to the old-menu branch. It will be unmaintained except for critical updates. It will eventually be removed - but not before everyone is ready to leave it.
  • +
+

Test that your backups are working before you switch. The old-menu branch will become avaiable just before the new menu is merged into master to ensure it has the latest commits applied.

+

Full change list

+
    +
  • Menu and everything that goes with it rewritten in Python and Blessed
  • +
  • Easy installation script
  • +
  • All services rewritten to be compatible with PyYAML
  • +
  • Optional port selection for services
  • +
  • Issue checking for services before building
  • +
  • Options for services now in menu (no more editing service.yml files)
  • +
  • Automatic password generation for each service
  • +
  • Pre and post scripts for customising services
  • +
  • Removed env files
  • +
  • Backup and restoring more streamlined
  • +
  • Documentation updated for all services
  • +
  • No longer needs to be installed in the home directory ~.
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Updates/Updating-the-Project/index.html b/Updates/Updating-the-Project/index.html new file mode 100644 index 000000000..e0c38c74d --- /dev/null +++ b/Updates/Updating-the-Project/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/Updates/gcgarner-migration/index.html b/Updates/gcgarner-migration/index.html new file mode 100644 index 000000000..f11c3ade7 --- /dev/null +++ b/Updates/gcgarner-migration/index.html @@ -0,0 +1,2803 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Migrating from gcgarner to SensorsIot - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Migrating from gcgarner to SensorsIot

+

These instructions explain how to migrate from gcgarner/IOTstack to SensorsIot/IOTstack.

+

Migrating to SensorsIot/IOTstack was fairly easy when this repository was first forked from gcgarner/IOTstack. Unfortunately, what was a fairly simple switching procedure no longer works properly because conflicts have emerged.

+

The probability of conflicts developing increases as a function of time since the fork. Conflicts were and are pretty much inevitable so a more involved procedure is needed.

+

Migration Steps

+

Step 1 – Check your assumptions

+

Make sure that you are, actually, on gcgarner. Don't assume!

+
$ git remote -v
+origin  https://github.com/gcgarner/IOTstack.git (fetch)
+origin  https://github.com/gcgarner/IOTstack.git (push)
+
+

Do not proceed if you don't see those URLs!

+

Step 2 – Take IOTstack down

+

Take your stack down. This is not strictly necessary but we'll be moving the goalposts a bit so it's better to be on the safe side.

+
$ cd ~/IOTstack
+$ docker-compose down
+
+

Step 3 – Choose your migration method

+

There are two basic approaches to switching from gcgarner/IOTstack to SensorsIot/IOTstack:

+ +

You can think of the first as "working with git" while the second is "using brute force".

+

The first approach will work if you haven't tried any other migration steps and/or have not made too many changes to items in your gcgarner/IOTstack that are under git control.

+

If you are already stuck or you try the first approach and get a mess, or it all looks far too hard to sort out, then try the Migration by clone and merge approach.

+

Migration Option 1 – change upstream repository

+
Check for local changes
+

Make sure you are on the master branch (you probably are so this is just a precaution), and then see if Git thinks you have made any local changes:

+
$ cd ~/IOTstack
+$ git checkout master
+$ git status
+
+

If Git reports any "modified" files, those will probably get in the way of a successful migration so it's a good idea to get those out of the way.

+

For example, suppose you edited menu.sh at some point. Git would report that as:

+
    modified:   menu.sh
+
+

The simplest way to deal with modified files is to rename them to move them out of the way, and then restore the original:

+
    +
  1. +

    Rename your customised version by adding your initials to the end of the filename. Later, you can come back and compare your customised version with the version from GitHub and see if you want to preserve any changes.

    +

    Here I'm assuming your initials are "jqh":

    +
    $ mv menu.sh menu.sh.jqh
    +
    +
  2. +
  3. +

    Tell git to restore the unmodified version:

    +
    $ git checkout -- menu.sh
    +
    +
  4. +
  5. +

    Now, repeat the Git command that complained about the file:

    +
    $ git status
    +
    +

    The modified file will show up as "untracked" which is OK (ignore it)

    +
    Untracked files:
    +  (use "git add <file>..." to include in what will be committed)
    +
    +    menu.sh.jqh
    +
    +
  6. +
+
Synchronise with gcgarner on GitHub
+

Make sure your local copy of gcgarner is in sync with GitHub.

+
$ git pull
+
+
Get rid of any upstream reference
+

There may or may not be any "upstream" set. The most likely reason for this to happen is if you used your local copy as the basis of a Pull Request.

+

The next command will probably return an error, which you should ignore. It's just a precaution.

+
$ git remote remove upstream
+
+
Point to SensorsIot
+

Change your local repository to point to SensorsIot.

+
$ git remote set-url origin https://github.com/SensorsIot/IOTstack.git
+
+
Synchronise with SensorsIot on GitHub
+

This is where things can get a bit tricky so please read these instructions carefully before you proceed.

+

When you run the next command, it will probably give you a small fright by opening a text-editor window. Don't panic - just keep reading. Now, run this command:

+
$ git pull -X theirs origin master
+
+

The text editor window will look something like this:

+
Merge branch 'master' of https://github.com/SensorsIot/IOTstack
+
+# Please enter a commit message to explain why this merge is necessary,
+# especially if it merges an updated upstream into a topic branch.
+#
+# Lines starting with '#' will be ignored, and an empty message aborts
+# the commit.
+
+

The first line is a pre-prepared commit message, the remainder is boilerplate instructions which you can ignore.

+

Exactly which text editor opens is a function of your EDITOR environment variable and the core.editor set in your global Git configuration. If you:

+
    +
  • +

    remember changing EDITOR and/or core.editor then, presumably, you will know how to interact with your chosen text editor. You don't need to make any changes to this file. All you need to do is save the file and exit;

    +
  • +
  • +

    don't remember changing either EDITOR or core.editor then the editor will probably be the default vi (aka vim). You need to type ":wq" (without the quotes) and then press return. The ":" puts vi into command mode, the "w" says "save the file" and "q" means "quit vi". Pressing return runs the commands.

    +
  • +
+

Git will display a long list of stuff. It's very tempting to ignore it but it's a good idea to take a closer look, particularly for signs of error or any lines beginning with:

+
Auto-merging
+
+

At the time of writing, you can expect Git to mention these two files:

+
Auto-merging menu.sh
+Auto-merging .templates/zigbee2mqtt/service.yml
+
+

Those are known issues and the merge strategy -X theirs on the git pull command you have just executed deals with both, correctly, by preferring the SensorsIot version.

+

Similar conflicts may emerge in future and those will probably be dealt with, correctly, by the same merge strategy. Nevertheless, you should still check the output very carefully for other signs of merge conflict so that you can at least be alive to the possibility that the affected files may warrant closer inspection.

+

For example, suppose you saw:

+
Auto-merging .templates/someRandomService/service.yml
+
+

If you don't use someRandomService then you could safely ignore this on the basis that it was "probably right". However, if you did use that service and it started to misbehave after migration, you would know that the service.yml file was a good place to start looking for explanations.

+
Finish with a pull
+

At this point, only the migrated master branch is present on your local copy of the repository. The next command brings you fully in-sync with GitHub:

+
$ git pull
+
+

Migration Option 2 – clone and merge

+

If you have been following the process correctly, your IOTstack will already be down.

+
Rename your existing IOTstack folder
+

Move your old IOTstack folder out of the way, like this:

+
$ cd ~
+$ mv IOTstack IOTstack.old
+
+

Note:

+
    +
  • You should not need sudo for the mv command but it is OK to use it if necessary.
  • +
+
Fetch a clean clone of SensorsIot/IOTstack
+
$ git clone https://github.com/SensorsIot/IOTstack.git ~/IOTstack
+
+

Explore the result:

+
$ tree -aFL 1 --noreport ~/IOTstack
+/home/pi/IOTstack
+├── .bash_aliases
+├── .git/
+├── .github/
+├── .gitignore
+├── .native/
+├── .templates/
+├── .tmp/
+├── LICENSE
+├── README.md
+├── docs/
+├── duck/
+├── install.sh*
+├── menu.sh*
+├── mkdocs.yml
+└── scripts/
+
+

Note:

+
    +
  • If the tree command is not installed for some reason, use ls -A1F ~/IOTstack.
  • +
+

Observe what is not there:

+
    +
  • There is no docker-compose.yml
  • +
  • There is no backups directory
  • +
  • There is no services directory
  • +
  • There is no volumes directory
  • +
+

From this, it should be self-evident that a clean checkout from GitHub is the factory for all IOTstack installations, while the contents of backups, services, volumes and docker-compose.yml represent each user's individual choices, configuration options and data.

+
Merge old into new
+

Execute the following commands:

+
$ mv ~/IOTstack.old/docker-compose.yml ~/IOTstack
+$ mv ~/IOTstack.old/services ~/IOTstack
+$ sudo mv ~/IOTstack.old/volumes ~/IOTstack 
+
+

You should not need to use sudo for the first two commands. However, if you get a permissions conflict on either, you should proceed like this:

+
    +
  • +

    docker-compose.yml

    +
    $ sudo mv ~/IOTstack.old/docker-compose.yml ~/IOTstack
    +$ sudo chown pi:pi ~/IOTstack/docker-compose.yml
    +
    +
  • +
  • +

    services

    +
    $ sudo mv ~/IOTstack.old/services ~/IOTstack
    +$ sudo chown -R pi:pi ~/IOTstack/services
    +
    +
  • +
+

There is no need to migrate the backups directory. You are better off creating it by hand:

+
$ mkdir ~/IOTstack/backups
+
+

Step 4 – Choose your menu

+

If you have reached this point, you have migrated to SensorsIot/IOTstack where you are on the "master" branch. This implies "new menu".

+

The choice of menu is entirely up to you. Differences include:

+
    +
  1. New menu takes a lot more screen real-estate than old menu. If you do a fair bit of work on small screens (eg iPad) you might find it hard to work with new menu.
  2. +
  3. New menu creates a large number of internal Docker networks whereas old menu has one internal network to rule them all. The practical consequence is that most users see error messages for networks being defined but not used, and occasionally run into problems where two containers can't talk to each other without tinkering with the networks. Neither of those happen under old menu. See Issue 245 if you want more information on this.
  4. +
  5. New menu has moved the definition of environment variables into docker-compose.yml. Old menu keeps environment variables in "environment files" in ~/IOTstack/services. There is no "right" or "better" about either approach. It's just something to be aware of.
  6. +
  7. Under new menu, the service.yml files in ~/IOTstack/.templates have all been left-shifted by two spaces. That means you can no longer use copy and paste to test containers - you're stuck with the extra work of re-adding the spaces. Again, this doesn't matter but you do need to be aware of it.
  8. +
+

What you give up when you choose old menu is summarised in the following. If a container appears on the right hand side but not the left then it is only available in new menu.

+
old-menu                master (new menu)
+├── adminer             ├── adminer
+├── blynk_server        ├── blynk_server
+├── dashmachine         ├── dashmachine
+├── deconz              ├── deconz
+├── diyhue              ├── diyhue
+├── domoticz            ├── domoticz
+├── dozzle              ├── dozzle
+├── espruinohub         ├── espruinohub
+                      > ├── example_template
+├── gitea               ├── gitea
+├── grafana             ├── grafana
+├── heimdall            ├── heimdall
+                      > ├── home_assistant
+├── homebridge          ├── homebridge
+├── homer               ├── homer
+├── influxdb            ├── influxdb
+├── mariadb             ├── mariadb
+├── mosquitto           ├── mosquitto
+├── motioneye           ├── motioneye
+├── nextcloud           ├── nextcloud
+├── nodered             ├── nodered
+├── openhab             ├── openhab
+├── pihole              ├── pihole
+├── plex                ├── plex
+├── portainer           ├── portainer
+├── portainer_agent     ├── portainer_agent
+├── portainer-ce        ├── portainer-ce
+├── postgres            ├── postgres
+├── prometheus          ├── prometheus
+├── python              ├── python
+├── qbittorrent         ├── qbittorrent
+├── rtl_433             ├── rtl_433
+├── tasmoadmin          ├── tasmoadmin
+├── telegraf            ├── telegraf
+├── timescaledb         ├── timescaledb
+├── transmission        ├── transmission
+├── webthings_gateway   ├── webthings_gateway
+├── wireguard           ├── wireguard
+└── zigbee2mqtt         ├── zigbee2mqtt
+                      > └── zigbee2mqtt_assistant
+
+

You also give up the compose-override.yml functionality. On the other hand, Docker has its own docker-compose.override.yml which works with both menus.

+

If you want to switch to the old menu:

+
$ git checkout old-menu
+
+

Any time you want to switch back to the new menu:

+
$ git checkout master
+
+

You can switch back and forth as much as you like and as often as you like. It's no harm, no foul. The branch you are on just governs what you see when you run:

+
$ ./menu.sh
+
+

Although you can freely change branches, it's probably not a good idea to try to mix-and-match your menus. Pick one menu and stick to it.

+

Even so, nothing will change until you run your chosen menu to completion and allow it to generate a new docker-compose.yml.

+

Step 5 – Bring up your stack

+

Unless you have gotten ahead of yourself and have already run the menu (old or new) then nothing will have changed in the parts of your ~/IOTstack folder that define your IOTstack implementation. You can safely:

+
$ docker-compose up -d
+
+

See also

+

There is another gist Installing Docker for IOTstack which explains how to overcome problems with outdated Docker and Docker-Compose installations.

+

Depending on the age of your gcgarner installation, you may run into problems which will be cured by working through that gist.

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Updates/index.html b/Updates/index.html new file mode 100644 index 000000000..6513b43ac --- /dev/null +++ b/Updates/index.html @@ -0,0 +1,2486 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Updating the project - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Updating the project

+

There are two different update sources: the IOTstack project (github.com) and +Docker image registries (e.g. hub.docker.com). Both the initial stack creation +and updates use both of these. Initial creation is a bit simpler, as the +intermediate steps are done automatically. For a full update they need to be +performed explicitly. To illustrate the steps and artifacts of the update +process:

+
flowchart TD
+  GIT[github.com/sensorsiot/IOTstack.git]
+  GIT       --- GITPULL([$ git pull -r])
+  GITPULL   --> TEMPLATES["~/IOTstack/.templates"]
+  TEMPLATES --- MENU([$ ./menu.sh -> Build stack])
+  MENU      --> COMPOSE["~/IOTstack/docker-compose.yml
+                         ~/IOTstack/.templates/*/Dockerfile
+                         ~/IOTstack/services/*/Dockerfile"]
+  COMPOSE   --- UP(["$ docker-compose up --build -d"])
+
+  HUB[hub.docker.com images and tags]
+  HUB       --- PULL([$ docker-compose pull\n$ docker-compose build --pull --no-cache])
+  COMPOSE   --- PULL
+  PULL      --> CACHE[local Docker image cache]
+  CACHE     --- UP
+
+  UP        --> CONTAINER[recreated Docker containers based on the latest cached images]
+
+  classDef command fill:#9996,stroke-width:0px
+  class GITPULL,MENU,UP,PULL command
+
+Minor details fudged in the graph +

In order to keep the graph simple, some minor details were left unprecise:

+
    +
  • $ docker-compose pull will read docker-compose.yml, in order to know + what image tags to check for updates.
  • +
  • +

    $ docker-compose build --pull --no-cache will use docker-compose.yml + to find which of the "build:" sources are in use:

    +
      +
    • ~/IOTstack/.templates/*/Dockerfile
    • +
    • ~/IOTstack/services/*/Dockerfile
    • +
    • remote repositories with Dockerfiles
    • +
    +

    and pull Docker images referenced in these while building.

    +
  • +
  • +

    $ docker-compose up --build -d may not require the "--build"-flag, + but having it won't hurt (and may help keep some corner-case problems + away, docker may be a bit finicky).

    +
  • +
+
+

Backup and rollback

+

The usual way of backing up just your ~/IOTstack contents isn't sufficient +for a 100% identical restore. Some containers may have local ephemeral +modifications that will be lost when they're recreated. Currently running +containers may be based on now outdated images. Recreating a container using an +old image is tricky. The local Docker image cache can't easily be restored to +the same state with old images and old tag references. The docker pull will +fetch the latest images, but it's not unheard of that the latest image may +break something.

+

Thus to guarantee a successful rollback to the pre-update state, you have to +shutdown your RPi and save a complete disk image backup of its storage using +another machine.

+

For a hobby project, not having a perfect rollback may be a risk you're willing +to take. Usually image problems will have fixes/workarounds within a day.

+

Update Raspberry Pi OS

+

You should keep your Raspberry Pi up-to-date. Despite the word "container" +suggesting that containers are fully self-contained, they sometimes depend on +operating system components (WireGuard is an example).

+
$ sudo apt update
+$ sudo apt upgrade -y
+
+ +

When you built the stack using the menu, it created the Docker Compose file +docker-compose.yml. This file and any used build instructions +(Dockerfiles), use image name and tag references to images on hub.docker.com +or other registries. An undefined tag defaults to :latest. When Docker is +told to pull updated images, it will download the images into the local +cache, based upon what is currently stored at the registry for the used names +and tags.

+

Updating the IOTstack project templates and recreating your +docker-compose.yml isn't usually necessary. Doing so isn't likely to provide +much benefits, and may actually break something. A full update is only +recommended when there is a new feature or change you need.

+
+

Recommended update procedure

+
    +
  1. Shutdown your RPi, remove the storage medium and do a full backup + image + of the storage to another machine. Reattach the storage back and power + up your RPi.
    + NOTE: To skip this step may cause days of downtime as you debug a + problem or wait for fixes.
  2. +
  3. Get latest images from the web: +
    $ docker-compose pull
    +
  4. +
  5. Rebuild localy created images based on new parent images: +
    $ docker-compose build --pull --no-cache
    +
    + Note: this may not do anything, depending on your selected services.
  6. +
  7. Update(recreate) containers that have new images: +
    $ docker-compose up --build -d
    +
  8. +
+
+

If a service fails to start after it's updated, especially if you are updating +frequently, wait for a few hours and repeat the update procedure. Sometimes bad +releases are published to hub.docker.com, but they are usually fixed in under +half a day. Of course you are always welcome to report the problem to our +Discord server. Usually someone else has +encountered the same problem and reported the fix.

+

Full update

+

Periodically updates are made to project which include new or updated container +template, changes to backups or additional features. To evaluate if this is +really needed, see the changelog or merged pull requests. To apply all these +changes all service definitions are recreated. As a drawback, this will wipe +any custom changes to docker-compose.yml, may change semantics or even require +manual migration steps.

+
+

Breaking update

+

A change done 2022-01-18 will require manual steps +or you may get an error like:
+ERROR: Service "influxdb" uses an undefined network "iotstack_nw"

+
+

Full update steps:

+
    +
  1. Shutdown your RPi, remove the storage medium and do a full backup + image + of the storage to another machine. Reattach the storage back and power up + your RPi.
    + NOTE: To skip this step may cause days of downtime as you debug a problem or + wait for fixes.
  2. +
  3. +

    check git status --untracked-files no for any local changes you may have + made to project files. For any listed changes, either:

    +
      +
    1. Save and preserve your change by doing a local commit: git commit -m + "local customization" -- path/to/changed_file, or
    2. +
    3. Revert it using: git checkout -- path/to/changed_file
    4. +
    +
  4. +
  5. +

    Update project files from github: git pull -r origin master

    +
  6. +
  7. Save your current compose file: cp docker-compose.yml + docker-compose.yml.bak. NOTE: this is really useful, as the next step will + overwrite all your previous manual changes to docker-compose.yml.
  8. +
  9. Recreate the compose file and Dockerfile:s: ./menu.sh, select Build Stack, + for each of your selected services: de- and re-select it, press enter to + build, and then exit.
  10. +
  11. check the changes for obvious errors (e.g. passwords): diff + docker-compose.yml docker-compose.yml.bak
  12. +
  13. Perform the Docker image update procedure: +
    $ docker-compose pull
    +$ docker-compose build --pull --no-cache 
    +$ docker-compose up --build -d 
    +
  14. +
+

Troubleshooting: if a container fails to start after update

+
    +
  • try restarting the whole stack: docker-compose restart
  • +
  • Check log output of the failing service: docker-compose logs *service-name*
      +
    • try googling and fixing problems in docker-compose.yml manually.
    • +
    +
  • +
  • check how the container definitions have changed: diff docker-compose.yml + docker-compose.yml.bak
  • +
  • try rebuilding your complete stack from scratch:
      +
    1. check that you have a backup.
    2. +
    3. stop and remove Docker containers: docker-compose down
    4. +
    5. remove all menu generated files: rm -r docker-compose.yml services
    6. +
    7. recreate the stack: ./menu.sh, select Build Stack, select all your + services, press enter to build, and then exit.
    8. +
    9. try starting: docker-compose up -d
    10. +
    +
  • +
  • Go to the IOTstack Discord and describe your + problem. We're happy to help.
  • +
+

Old-menu

+
+

Warning

+

If you ran git checkout -- 'git ls-files -m' as suggested in the old wiki entry then please check your duck.sh because it removed your domain and token

+
+

Git offers build in functionality to fetch the latest changes.

+

git pull origin master will fetch the latest changes from GitHub without overwriting files that you have modified yourself. If you have done a local commit then your project may to handle a merge conflict.

+

This can be verified by running git status. You can ignore if it reports duck.sh as being modified.

+

image

+

Should you have any modified scripts or templates they can be reset to the latest version with git checkout -- scripts/ .templates/

+

With the new latest version of the project you can now use the menu to build your stack. If there is a particular container you would like to update its template then you can select that at the overwrite option for your container. You have the choice to not to overwrite, preserve env files or to completely overwrite any changes (passwords)

+

image

+

After your stack had been rebuild you can run docker-compose up -d to pull in the latest changes. If you have not update your images in a while consider running the ./scripts/update.sh to get the latest version of the image from Docker hub as well

+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/Updates/migration-network-change/index.html b/Updates/migration-network-change/index.html new file mode 100644 index 000000000..014e37693 --- /dev/null +++ b/Updates/migration-network-change/index.html @@ -0,0 +1,2255 @@ + + + + + + + + + + + + + + + + + + + + + + + Migration: network change - IOTstack + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Migration: network change

+

Networking under both new menu (master branch) and old menu (old-menu branch) has undergone a significant change. This will not affect new users of IOTstack (who will adopt it automatically). Neither will it affect existing users who do not use the menu to maintain their stacks (see adopting networking changes by hand below).

+

Users who do use the menu to maintain their stacks will also be unaffected until the next menu run, at which point it will be prudent to down your stack entirely and re-select all your containers. Downing the stack causes Docker to remove all associated networks as well as the containers.

+

These changes mean that networking is identical under both old and new menus. To summarise the changes:

+
    +
  1. +

    Only two internal networks are defined – as follows:

    +
      +
    • "default" which adopts the name iotstack_default at runtime.
    • +
    • "nextcloud" which adopts the name iotstack_nextcloud at runtime.
    • +
    +

    If you are using docker-compose v2.0.0 or later then the iotstack_nextcloud network will only be instantiated if you select NextCloud as one of your services. Earlier versions of docker-compose instantiate all networks even if no service uses them (which is why you get those warnings at "up" time).

    +
  2. +
  3. +

    The only service definitions which now have networks: directives are:

    +
      +
    • NextCloud: joins the "default" and "nextcloud" networks; and
    • +
    • NextCloud_DB: joins the "nextcloud" network.
    • +
    +

    All other containers will join the "default" network, automatically, without needing any networks: directives.

    +
  4. +
+

adopting networking changes by hand

+

If you maintain your docker-compose.yml by hand, you can adopt the networking changes by doing the following:

+
    +
  1. Take your stack down. This causes Docker to remove any existing networks.
  2. +
  3. +

    Remove all networks: directives wherever they appear in your docker-compose.yml. That includes:

    +
      +
    • the networks: directives in all service definitions; and
    • +
    • the networks: specifications at the end of the file.
    • +
    +
  4. +
  5. +

    Append the contents of the following file to your docker-compose.yml:

    +
    ~/IOTstack/.templates/docker-compose-base.yml
    +
    +

    For example:

    +
    $ cat ~/IOTstack/.templates/docker-compose-base.yml >>~/IOTstack/docker-compose.yml
    +
    +

    The docker-compose-base.yml file is named env.yml in the old-menu branch.

    +
  6. +
  7. +

    If you run the NextCloud service then:

    +
      +
    • +

      Add these lines to the NextCloud service definition:

      +
      networks:
      +  - default
      +  - nextcloud
      +
      +
    • +
    • +

      Add these lines to the NextCloud_DB service definition:

      +
      networks:
      +  - nextcloud
      +
      +
    • +
    +
  8. +
  9. +

    Bring up your stack.

    +
  10. +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 000000000..1cf13b9f9 Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.dd8806f2.min.js b/assets/javascripts/bundle.dd8806f2.min.js new file mode 100644 index 000000000..e22d189fd --- /dev/null +++ b/assets/javascripts/bundle.dd8806f2.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var O=f()(_);return u("cut"),O},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",O=document.createElement("textarea");O.style.fontSize="12pt",O.style.border="0",O.style.padding="0",O.style.margin="0",O.style.position="absolute",O.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return O.style.top="".concat(j,"px"),O.setAttribute("readonly",""),O.value=V,O}var te=function(_,O){var j=A(_);O.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var O=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,O):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,O):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(O){return typeof O}:H=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},O=_.action,j=O===void 0?"copy":O,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(O){return typeof O}:Ie=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var O=0;O<_.length;O++){var j=_[O];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,O){return _&&ro(V.prototype,_),O&&ro(V,O),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(O){return O.__proto__||Object.getPrototypeOf(O)},Wt(V)}function vr(V,_){var O="data-clipboard-".concat(V);if(_.hasAttribute(O))return _.getAttribute(O)}var Ri=function(V){Ci(O,V);var _=Hi(O);function O(j,D){var Y;return _i(this,O),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(O,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),O}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var M=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,we(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return M}:x(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return x(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),B(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function E(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=E("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),we(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),B(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(y(r=>r.observe(t)),v(r=>Go.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),B(1));function tt(e){return Ea.pipe(y(t=>t.observe(e)),v(t=>en.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(v(t=>t?M:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(G("navigation.instant")&&!t){let r=E("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=E("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),b(t=>t.length>0),B(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():M))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),B(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),B(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),B(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),B(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function Te(){return St}function G(e){return St.features.includes(e)}function ye(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!G("announce.dismiss")||!e.childElementCount)return M;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(y(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(y(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?E("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},E("div",{class:"md-tooltip__inner md-typeset"})):E("div",{class:"md-tooltip",id:e,role:"tooltip"},E("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return E("div",{class:"md-tooltip2",role:"tooltip"},E("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return E("aside",{class:"md-annotation",tabIndex:0},Pt(t),E("a",{href:r,class:"md-annotation__index",tabIndex:-1},E("span",{"data-md-annotation-id":e})))}else return E("aside",{class:"md-annotation",tabIndex:0},Pt(t),E("span",{class:"md-annotation__index",tabIndex:-1},E("span",{"data-md-annotation-id":e})))}function wn(e){return E("button",{class:"md-clipboard md-icon",title:ye("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,E("del",null,c)," "],[]).slice(0,-1),i=Te(),a=new URL(e.location,i.base);G("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=Te();return E("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},E("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&E("div",{class:"md-search-result__icon md-icon"}),r>0&&E("h1",null,e.title),r<=0&&E("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return E("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&E("p",{class:"md-search-result__terms"},ye("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=Te(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreqr(l,1)),...p.length?[E("details",{class:"md-search-result__more"},E("summary",{tabIndex:-1},E("div",null,p.length>0&&p.length===1?ye("search.result.more.one"):ye("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return E("li",{class:"md-search-result__item"},c)}function Sn(e){return E("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>E("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return E("div",{class:t,hidden:!0},E("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return E("div",{class:"md-typeset__scrollwrap"},E("div",{class:"md-typeset__table"},e))}function Ca(e){let t=Te(),r=new URL(`../${e.version}/`,t.base);return E("li",{class:"md-version__item"},E("a",{href:`${r}`,class:"md-version__link"},e.title))}function Mn(e,t){return e=e.filter(r=>{var o;return!((o=r.properties)!=null&&o.hidden)}),E("div",{class:"md-version"},E("button",{class:"md-version__current","aria-label":ye("select.version")},t.title),E("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:M),y(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(y(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),we(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(y(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?M:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):M})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||G("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),G("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||G("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:M)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(y(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return G("content.lazy")?tt(e).pipe(b(n=>n),we(1),v(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),y(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(y(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10.7.0/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(y(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),B(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=E("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=E("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(E("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),y(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return G("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),tt(e).pipe(v(()=>Na(n)),y(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>G("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(v(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(y(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return M;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(y(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!G("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),B(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(b(()=>G("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?M:Ya(o,t).pipe(y(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),B(1))}function Kn(e){let t=$("input",e),r=E("meta",{name:"theme-color"});document.head.appendChild(r);let o=E("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),y(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(y(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(y(t=>{t.trigger.focus()}),m(()=>ye("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return M;let r=e.target.closest("a");if(r===null)return M;if(r.target||e.metaKey||e.ctrlKey)return M;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):M}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...G("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),M}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=Te();if(location.protocol==="file:")return M;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),v(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),M)))),v(Xn),v(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),e.pipe(Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),y(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:G("search.suggest")}}})),r}function ii({document$:e}){let t=Te(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>M)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?M:(i.preventDefault(),I(p))}}return M}),v(i=>{let{version:a}=n.get(i);return ur(new URL(i)).pipe(m(s=>{let c=xe().href.replace(t.base,"");return s.has(c.split("#")[0])?new URL(`../${a}/${c}`,t.base):new URL(i)}))})))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),B(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(y(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),B(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?ye("search.result.none"):ye("search.result.placeholder");break;case 1:a.textContent=ye("search.result.one");break;default:let u=sr(l.length);a.textContent=ye("search.result.other",u)}});let p=o.pipe(y(()=>s.innerHTML=""),v(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),v(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?M:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(y(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(y(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(y(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=Te();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=E("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(y(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>M),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>M),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>M),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return M}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return M}return di(e.href).pipe(y(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>M),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),B(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(y(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(G("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(y(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),G("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return G("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(y(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(y(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(v(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),we(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title")))})).subscribe(),e.pipe(v(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(v(()=>$(".md-toggle--indeterminate")),y(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(v(()=>$("[data-md-scrollfix]")),y(t=>t.removeAttribute("data-md-scrollfix")),b(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),B(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=Te(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;G("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Qn(e,{viewport$:Oe,header$:rt})),B(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>G("search.highlight")?mi(e,{index$:Mi,location$:jt}):M),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(v(()=>hs),Pe(ds),B(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})(); +//# sourceMappingURL=bundle.dd8806f2.min.js.map + diff --git a/assets/javascripts/bundle.dd8806f2.min.js.map b/assets/javascripts/bundle.dd8806f2.min.js.map new file mode 100644 index 000000000..17bf02572 --- /dev/null +++ b/assets/javascripts/bundle.dd8806f2.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an