Skip to content

Commit

Permalink
Merge pull request #385 from SelfhostedPro/develop
Browse files Browse the repository at this point in the history
# Yacht Alpha v0.0.7 Released!

## Notable Changes:
* [Shipwright](https://shipwright.yacht.sh) a new template builder is released (pre-alpha)
* Yacht is now a PWA and if published with SSL you'll be able to install it on most devices for easy access.
* API Key framework so now applications can interact with Yacht directly (found in user settings when auth is enabled).
* Changed logs and stats to Server Sent Events so websocket support is no longer needed 
*\*this may change in the future as I believe it will be needed for container CLI access*
* Redesigned the look of all the main pages
* Support for command, memory limits, and cpu in templates and deployments.

## Bugfixes:
* Better error handling for Projects.
* Issue where ports were defined more than once
* Data in dashboards being off
* Various UI glitches
* Other various fixes (view merge request for a full list)
  • Loading branch information
SelfhostedPro authored Apr 23, 2021
2 parents 7e390a8 + aad1783 commit d0253af
Show file tree
Hide file tree
Showing 123 changed files with 4,103 additions and 1,142 deletions.
8 changes: 3 additions & 5 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,15 +51,13 @@ jobs:
platforms: linux/amd64,linux/arm64,linux/arm
push: true
build-args: |
VUE_APP_VERSION=v0.0.6-alpha-${{ steps.current-time.outputs.formattedTime }}
VUE_APP_VERSION=v0.0.7-alpha-${{ steps.current-time.outputs.formattedTime }}
tags: |
selfhostedpro/yacht
selfhostedpro/yacht:latest-${{ steps.current-time.outputs.formattedTime }}
selfhostedpro/yacht:v0.0.7-alpha-${{ steps.current-time.outputs.formattedTime }}
ghcr.io/selfhostedpro/yacht
ghcr.io/selfhostedpro/yacht:latest-${{ steps.current-time.outputs.formattedTime }}
selfhostedpro/yacht:v0.0.6-alpha-${{ steps.current-time.outputs.formattedTime }}
ghcr.io/selfhostedpro/yacht
ghcr.io/selfhostedpro/yacht:latest-${{ steps.current-time.outputs.formattedTime }}
ghcr.io/selfhostedpro/yacht:v0.0.6-alpha-${{ steps.current-time.outputs.formattedTime }}
ghcr.io/selfhostedpro/yacht:v0.0.7-alpha-${{ steps.current-time.outputs.formattedTime }}
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
.github
.vscode
versions
sql_app.db
venv
*.db
Expand Down
6 changes: 4 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,16 @@ RUN \
apk add --no-cache --virtual=build-dependencies \
g++ \
make \
postgresql-dev \
python3-dev \
libffi-dev \
ruby-dev &&\
echo "**** install packages ****" && \
apk add --no-cache \
python3 \
py3-pip \
postgresql-dev \
mysql-dev \
postgresql-dev \
mysql-dev \
nginx &&\
gem install sass &&\
Expand All @@ -58,4 +60,4 @@ COPY nginx.conf /etc/nginx/

# Expose
VOLUME /config
EXPOSE 8000
EXPOSE 8000
12 changes: 5 additions & 7 deletions backend/alembic/env.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
from os.path import abspath, dirname
import sys
import os
from logging.config import fileConfig

from sqlalchemy import engine_from_config, MetaData
Expand All @@ -16,18 +19,11 @@
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
import os
import sys
from os.path import abspath, dirname

sys.path.insert(0, dirname(dirname(abspath(__file__))))


from api.db import models
from api.settings import Settings

print("--- MODELS ---")
print(models)
# Combine metadata from auth and containers/templates
combined_meta_data = MetaData()
for declarative_base in [models.Base]:
Expand Down Expand Up @@ -66,6 +62,7 @@ def run_migrations_offline():
)

with context.begin_transaction():
context.execute("DROP TABLE IF EXISTS alembic_version;")
context.run_migrations()


Expand All @@ -86,6 +83,7 @@ def run_migrations_online():
context.configure(connection=connection, target_metadata=target_metadata)

with context.begin_transaction():
context.execute("DROP TABLE IF EXISTS alembic_version;")
context.run_migrations()


Expand Down
125 changes: 123 additions & 2 deletions backend/api/actions/apps.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from fastapi import HTTPException
from fastapi.responses import StreamingResponse

from api.db.schemas.apps import DeployLogs, DeployForm, AppLogs, Processes
from api.utils.apps import (
Expand All @@ -12,13 +13,24 @@
conv_restart2data,
conv_sysctls2data,
conv_volumes2data,
conv_cpus2data,
_check_updates,
calculate_cpu_percent,
calculate_cpu_percent2,
format_bytes,
)
from api.utils.templates import conv2dict

import yaml
import json
import io
import zipfile
import time
import subprocess
import docker
import aiodocker
import asyncio


"""
Returns all running apps in a list
Expand Down Expand Up @@ -160,6 +172,7 @@ def deploy_app(template: DeployForm):
template.name,
conv_image2data(template.image),
conv_restart2data(template.restart_policy),
template.command,
conv_ports2data(template.ports, template.network, template.network_mode),
conv_portlabels2data(template.ports),
template.network_mode,
Expand All @@ -170,6 +183,8 @@ def deploy_app(template: DeployForm):
conv_labels2data(template.labels),
conv_sysctls2data(template.sysctls),
conv_caps2data(template.cap_add),
conv_cpus2data(template.cpus),
template.mem_limit,
edit=template.edit or False,
_id=template.id or None,
)
Expand Down Expand Up @@ -214,6 +229,7 @@ def launch_app(
name,
image,
restart_policy,
command,
ports,
portlabels,
network_mode,
Expand All @@ -224,6 +240,8 @@ def launch_app(
labels,
sysctls,
caps,
cpus,
mem_limit,
edit,
_id,
):
Expand All @@ -246,6 +264,7 @@ def launch_app(
name=name,
image=image,
restart_policy=restart_policy,
command=command,
ports=ports,
network=network,
network_mode=network_mode,
Expand All @@ -255,13 +274,17 @@ def launch_app(
labels=combined_labels,
devices=devices,
cap_add=caps,
nano_cpus=cpus,
mem_limit=mem_limit,
detach=True,
)
except Exception as e:
except docker.errors.APIError as e:
if e.status_code == 500:
failed_app = dclient.containers.get(name)
failed_app.remove()
raise e
raise HTTPException(
status_code=e.status_code, detail=e.explanation.decode("utf-8")
)

print(
f"""Container started successfully.
Expand Down Expand Up @@ -424,3 +447,101 @@ def check_self_update():
raise HTTPException(status_code=400, detail=exc.args)

return _check_updates(yacht.image.tags[0])


def generate_support_bundle(app_name):
dclient = docker.from_env()
if dclient.containers.get(app_name):
app = dclient.containers.get(app_name)
stream = io.BytesIO()
with zipfile.ZipFile(stream, "w") as zf:
# print(compose)
# print(compose.get("services"))
attrs = app.attrs
service_log = app.logs()
zf.writestr(f"{app_name}.log", service_log)
zf.writestr(f"{app_name}-config.yml", yaml.dump(attrs))
# It is possible that ".write(...)" has better memory management here.
stream.seek(0)
return StreamingResponse(
stream,
media_type="application/x-zip-compressed",
headers={
"Content-Disposition": f"attachment;filename={app_name}_bundle.zip"
},
)
else:
raise HTTPException(404, f"App {app_name} not found.")


async def log_generator(request, app_name):
while True:
async with aiodocker.Docker() as docker:
container: DockerContainer = await docker.containers.get(app_name)
if container._container["State"]["Status"] == "running":
logs_generator = container.log(
stdout=True, stderr=True, follow=True, tail=200
)
async for line in logs_generator:
yield {"event": "update", "retry": 3000, "data": line}

if await request.is_disconnected():
break


async def stat_generator(request, app_name):
prev_stats = None
while True:
async with aiodocker.Docker() as adocker:
container: DockerContainer = await adocker.containers.get(app_name)
if container._container["State"]["Status"] == "running":
stats_generator = container.stats(stream=True)

async for line in stats_generator:
current_stats = await process_app_stats(line, app_name)
if prev_stats != current_stats:
yield {
"event": "update",
"retry": 30000,
"data": json.dumps(current_stats),
}
prev_stats = current_stats

if await request.is_disconnected():
break

# Stats are generated every second by docker
# so there's no point in checking more often than that
await asyncio.sleep(1)


async def process_app_stats(line, app_name):
cpu_total = 0.0
cpu_system = 0.0
cpu_percent = 0.0
if line["memory_stats"]:
mem_current = line["memory_stats"]["usage"]
mem_total = line["memory_stats"]["limit"]
mem_percent = (mem_current / mem_total) * 100.0
else:
mem_current = None
mem_total = None
mem_percent = None

try:
cpu_percent, cpu_system, cpu_total = await calculate_cpu_percent2(
line, cpu_total, cpu_system
)
except KeyError:
print("error while getting new CPU stats: %r, falling back")
cpu_percent = await calculate_cpu_percent(line)

full_stats = {
"time": line["read"],
"name": app_name,
"mem_total": mem_total,
"cpu_percent": round(cpu_percent, 1),
"mem_current": mem_current,
"mem_percent": round(mem_percent, 1),
}
return full_stats
15 changes: 11 additions & 4 deletions backend/api/actions/compose.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,8 +194,9 @@ def get_compose_projects():
if loaded_compose.get("networks"):
for network in loaded_compose.get("networks"):
networks.append(network)
for service in loaded_compose.get("services"):
services[service] = loaded_compose["services"][service]
if loaded_compose.get("services"):
for service in loaded_compose.get("services"):
services[service] = loaded_compose["services"][service]
_project = {
"name": project,
"path": file,
Expand Down Expand Up @@ -234,8 +235,9 @@ def get_compose(name):
if loaded_compose.get("networks"):
for network in loaded_compose.get("networks"):
networks.append(network)
for service in loaded_compose.get("services"):
services[service] = loaded_compose["services"][service]
if loaded_compose.get("services"):
for service in loaded_compose.get("services"):
services[service] = loaded_compose["services"][service]
_content = open(file)
content = _content.read()
compose_object = {
Expand Down Expand Up @@ -269,6 +271,11 @@ def write_compose(compose):
try:
f.write(compose.content)
f.close()
except TypeError as exc:
if exc.args[0] == "write() argument must be str, not None":
raise HTTPException(
status_code=422, detail="Compose file cannot be empty."
)
except Exception as exc:
raise HTTPException(exc.status_code, exc.detail)

Expand Down
16 changes: 11 additions & 5 deletions backend/api/actions/resources.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
from fastapi import HTTPException

### IMAGES ###


def get_images():
dclient = docker.from_env()
containers = dclient.containers.list(all=True)
Expand Down Expand Up @@ -174,11 +176,15 @@ def get_networks():
raise HTTPException(
status_code=exc.response.status_code, detail=exc.explanation
)
if attrs.get("inUse") == None:
attrs.update({"inUse": False})
if attrs.get("Labels", {}).get("com.docker.compose.project"):
attrs.update({"Project": attrs["Labels"]["com.docker.compose.project"]})
network_list.append(attrs)
if attrs:
if attrs.get("inUse") is None:
attrs.update({"inUse": False})
if attrs.get("Labels", {}):
if attrs.get("Labels", {}).get("com.docker.compose.project"):
attrs.update(
{"Project": attrs["Labels"]["com.docker.compose.project"]}
)
network_list.append(attrs)
return network_list


Expand Down
1 change: 1 addition & 0 deletions backend/api/db/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .models import *
25 changes: 25 additions & 0 deletions backend/api/db/crud/settings.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
from sqlalchemy.orm import Session

from api.db.models import containers as models
from api.db.models.settings import SecretKey
from datetime import datetime
from api.settings import Settings
import json

settings = Settings()


def export_settings(db: Session):
file_export = {}
Expand All @@ -12,6 +16,27 @@ def export_settings(db: Session):
return file_export


def get_secret_key(db: Session):
check = db.query(models.SecretKey).first()
if check:
return True
else:
return False


def generate_secret_key(db: Session):
check = db.query(SecretKey).first()
if check is None:
key = SecretKey(key=settings.SECRET_KEY)
db.add(key)
db.commit()
print("Secret key generated")
return key.key
else:
print("Secret key exists")
return check.key


def import_settings(db: Session, upload):
import_file = upload.file.read()
decoded_import = import_file.decode("utf-8")
Expand Down
Loading

0 comments on commit d0253af

Please sign in to comment.