From 2bf19e9f57a8594360fee6bb91c605dc2f54e501 Mon Sep 17 00:00:00 2001 From: Davy Peter Braun <543614+dheavy@users.noreply.github.com> Date: Mon, 1 Apr 2024 12:37:04 +0200 Subject: [PATCH 1/5] Fix 'software' not being linted by ruff --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0d70cf2b..8380e16f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ repos: rev: "v0.2.2" hooks: - id: ruff - args: ["--fix"] + args: ["--fix", "software/"] - id: ruff-format - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 From 403a29f0d6b7dab67c30d5a7805109408ebb6c39 Mon Sep 17 00:00:00 2001 From: Davy Peter Braun <543614+dheavy@users.noreply.github.com> Date: Mon, 1 Apr 2024 12:46:02 +0200 Subject: [PATCH 2/5] Add updated list of linted files --- software/source/clients/base_device.py | 196 ++++++++----- software/source/server/conftest.py | 5 +- software/source/server/i.py | 38 ++- software/source/server/server.py | 268 +++++++++++------- .../server/services/stt/local-whisper/stt.py | 87 +++--- .../source/server/services/tts/openai/tts.py | 42 +-- software/source/server/skills/schedule.py | 24 +- software/source/server/tests/test_run.py | 8 +- software/source/server/tunnel.py | 59 ++-- .../source/server/utils/get_system_info.py | 4 +- software/source/server/utils/local_mode.py | 75 ++--- software/source/server/utils/process_utils.py | 7 +- software/start.py | 102 ++++--- 13 files changed, 532 insertions(+), 383 deletions(-) diff --git a/software/source/clients/base_device.py b/software/source/clients/base_device.py index 43b491fe..3bf900ef 100644 --- a/software/source/clients/base_device.py +++ b/software/source/clients/base_device.py @@ -1,23 +1,18 @@ from dotenv import load_dotenv + load_dotenv() # take environment variables from .env. import os import asyncio import threading -import os import pyaudio -from starlette.websockets import WebSocket -from queue import Queue from pynput import keyboard import json import traceback import websockets import queue -import pydub -import ast from pydub import AudioSegment from pydub.playback import play -import io import time import wave import tempfile @@ -25,7 +20,10 @@ import cv2 import base64 import platform -from interpreter import interpreter # Just for code execution. Maybe we should let people do from interpreter.computer import run? +from interpreter import ( + interpreter, +) # Just for code execution. Maybe we should let people do from interpreter.computer import run? + # In the future, I guess kernel watching code should be elsewhere? Somewhere server / client agnostic? from ..server.utils.kernel import put_kernel_messages_into_queue from ..server.utils.get_system_info import get_system_info @@ -33,6 +31,7 @@ from ..server.utils.logs import setup_logging from ..server.utils.logs import logger + setup_logging() os.environ["STT_RUNNER"] = "server" @@ -51,11 +50,11 @@ SPACEBAR_PRESSED = False # Flag to track spacebar press state # Camera configuration -CAMERA_ENABLED = os.getenv('CAMERA_ENABLED', False) +CAMERA_ENABLED = os.getenv("CAMERA_ENABLED", False) if type(CAMERA_ENABLED) == str: - CAMERA_ENABLED = (CAMERA_ENABLED.lower() == "true") -CAMERA_DEVICE_INDEX = int(os.getenv('CAMERA_DEVICE_INDEX', 0)) -CAMERA_WARMUP_SECONDS = float(os.getenv('CAMERA_WARMUP_SECONDS', 0)) + CAMERA_ENABLED = CAMERA_ENABLED.lower() == "true" +CAMERA_DEVICE_INDEX = int(os.getenv("CAMERA_DEVICE_INDEX", 0)) +CAMERA_WARMUP_SECONDS = float(os.getenv("CAMERA_WARMUP_SECONDS", 0)) # Specify OS current_platform = get_system_info() @@ -66,6 +65,7 @@ send_queue = queue.Queue() + class Device: def __init__(self): self.pressed_keys = set() @@ -89,23 +89,28 @@ def fetch_image_from_camera(self, camera_index=CAMERA_DEVICE_INDEX): if ret: temp_dir = tempfile.gettempdir() - image_path = os.path.join(temp_dir, f"01_photo_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.png") + image_path = os.path.join( + temp_dir, f"01_photo_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.png" + ) self.captured_images.append(image_path) cv2.imwrite(image_path, frame) logger.info(f"Camera image captured to {image_path}") - logger.info(f"You now have {len(self.captured_images)} images which will be sent along with your next audio message.") + logger.info( + f"You now have {len(self.captured_images)} images which will be sent along with your next audio message." + ) else: - logger.error(f"Error: Couldn't capture an image from camera ({camera_index})") + logger.error( + f"Error: Couldn't capture an image from camera ({camera_index})" + ) cap.release() return image_path - def encode_image_to_base64(self, image_path): """Encodes an image file to a base64 string.""" with open(image_path, "rb") as image_file: - return base64.b64encode(image_file.read()).decode('utf-8') + return base64.b64encode(image_file.read()).decode("utf-8") def add_image_to_send_queue(self, image_path): """Encodes an image and adds an LMC message to the send queue with the image data.""" @@ -114,7 +119,7 @@ def add_image_to_send_queue(self, image_path): "role": "user", "type": "image", "format": "base64.png", - "content": base64_image + "content": base64_image, } send_queue.put(image_message) # Delete the image file from the file system after sending it @@ -126,7 +131,6 @@ def queue_all_captured_images(self): self.add_image_to_send_queue(image_path) self.captured_images.clear() # Clear the list after sending - async def play_audiosegments(self): """Plays them sequentially.""" while True: @@ -141,27 +145,35 @@ async def play_audiosegments(self): except: logger.info(traceback.format_exc()) - def record_audio(self): - - if os.getenv('STT_RUNNER') == "server": + if os.getenv("STT_RUNNER") == "server": # STT will happen on the server. we're sending audio. - send_queue.put({"role": "user", "type": "audio", "format": "bytes.wav", "start": True}) - elif os.getenv('STT_RUNNER') == "client": + send_queue.put( + {"role": "user", "type": "audio", "format": "bytes.wav", "start": True} + ) + elif os.getenv("STT_RUNNER") == "client": # STT will happen here, on the client. we're sending text. send_queue.put({"role": "user", "type": "message", "start": True}) else: raise Exception("STT_RUNNER must be set to either 'client' or 'server'.") """Record audio from the microphone and add it to the queue.""" - stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) + stream = p.open( + format=FORMAT, + channels=CHANNELS, + rate=RATE, + input=True, + frames_per_buffer=CHUNK, + ) print("Recording started...") global RECORDING # Create a temporary WAV file to store the audio data temp_dir = tempfile.gettempdir() - wav_path = os.path.join(temp_dir, f"audio_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav") - wav_file = wave.open(wav_path, 'wb') + wav_path = os.path.join( + temp_dir, f"audio_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav" + ) + wav_file = wave.open(wav_path, "wb") wav_file.setnchannels(CHANNELS) wav_file.setsampwidth(p.get_sample_size(FORMAT)) wav_file.setframerate(RATE) @@ -178,17 +190,30 @@ def record_audio(self): duration = wav_file.getnframes() / RATE if duration < 0.3: # Just pressed it. Send stop message - if os.getenv('STT_RUNNER') == "client": + if os.getenv("STT_RUNNER") == "client": send_queue.put({"role": "user", "type": "message", "content": "stop"}) send_queue.put({"role": "user", "type": "message", "end": True}) else: - send_queue.put({"role": "user", "type": "audio", "format": "bytes.wav", "content": ""}) - send_queue.put({"role": "user", "type": "audio", "format": "bytes.wav", "end": True}) + send_queue.put( + { + "role": "user", + "type": "audio", + "format": "bytes.wav", + "content": "", + } + ) + send_queue.put( + { + "role": "user", + "type": "audio", + "format": "bytes.wav", + "end": True, + } + ) else: self.queue_all_captured_images() - if os.getenv('STT_RUNNER') == "client": - + if os.getenv("STT_RUNNER") == "client": # THIS DOES NOT WORK. We moved to this very cool stt_service, llm_service # way of doing things. stt_wav is not a thing anymore. Needs work to work @@ -199,12 +224,19 @@ def record_audio(self): send_queue.put({"role": "user", "type": "message", "end": True}) else: # Stream audio - with open(wav_path, 'rb') as audio_file: + with open(wav_path, "rb") as audio_file: byte_data = audio_file.read(CHUNK) while byte_data: send_queue.put(byte_data) byte_data = audio_file.read(CHUNK) - send_queue.put({"role": "user", "type": "audio", "format": "bytes.wav", "end": True}) + send_queue.put( + { + "role": "user", + "type": "audio", + "format": "bytes.wav", + "end": True, + } + ) if os.path.exists(wav_path): os.remove(wav_path) @@ -227,24 +259,27 @@ def on_press(self, key): if keyboard.Key.space in self.pressed_keys: self.toggle_recording(True) - elif {keyboard.Key.ctrl, keyboard.KeyCode.from_char('c')} <= self.pressed_keys: + elif {keyboard.Key.ctrl, keyboard.KeyCode.from_char("c")} <= self.pressed_keys: logger.info("Ctrl+C pressed. Exiting...") kill_process_tree() os._exit(0) def on_release(self, key): """Detect spacebar release and 'c' key press for camera, and handle key release.""" - self.pressed_keys.discard(key) # Remove the released key from the key press tracking set + self.pressed_keys.discard( + key + ) # Remove the released key from the key press tracking set if key == keyboard.Key.space: self.toggle_recording(False) - elif CAMERA_ENABLED and key == keyboard.KeyCode.from_char('c'): + elif CAMERA_ENABLED and key == keyboard.KeyCode.from_char("c"): self.fetch_image_from_camera() - async def message_sender(self, websocket): while True: - message = await asyncio.get_event_loop().run_in_executor(None, send_queue.get) + message = await asyncio.get_event_loop().run_in_executor( + None, send_queue.get + ) if isinstance(message, bytes): await websocket.send(message) else: @@ -257,7 +292,9 @@ async def websocket_communication(self, WS_URL): async def exec_ws_communication(websocket): if CAMERA_ENABLED: - print("\nHold the spacebar to start recording. Press 'c' to capture an image from the camera. Press CTRL-C to exit.") + print( + "\nHold the spacebar to start recording. Press 'c' to capture an image from the camera. Press CTRL-C to exit." + ) else: print("\nHold the spacebar to start recording. Press CTRL-C to exit.") @@ -280,7 +317,6 @@ async def exec_ws_communication(websocket): # At this point, we have our message if message["type"] == "audio" and message["format"].startswith("bytes"): - # Convert bytes to audio file audio_bytes = message["content"] @@ -294,13 +330,13 @@ async def exec_ws_communication(websocket): # 16,000 Hz frame rate frame_rate=16000, # mono sound - channels=1 + channels=1, ) self.audiosegments.append(audio) # Run the code if that's the client's job - if os.getenv('CODE_RUNNER') == "client": + if os.getenv("CODE_RUNNER") == "client": if message["type"] == "code" and "end" in message: language = message["format"] code = message["content"] @@ -308,7 +344,7 @@ async def exec_ws_communication(websocket): send_queue.put(result) if is_win10(): - logger.info('Windows 10 detected') + logger.info("Windows 10 detected") # Workaround for Windows 10 not latching to the websocket server. # See https://github.com/OpenInterpreter/01/issues/197 try: @@ -329,42 +365,46 @@ async def exec_ws_communication(websocket): await asyncio.sleep(2) async def start_async(self): - # Configuration for WebSocket - WS_URL = f"ws://{self.server_url}" - # Start the WebSocket communication - asyncio.create_task(self.websocket_communication(WS_URL)) - - # Start watching the kernel if it's your job to do that - if os.getenv('CODE_RUNNER') == "client": - asyncio.create_task(put_kernel_messages_into_queue(send_queue)) - - asyncio.create_task(self.play_audiosegments()) - - # If Raspberry Pi, add the button listener, otherwise use the spacebar - if current_platform.startswith("raspberry-pi"): - logger.info("Raspberry Pi detected, using button on GPIO pin 15") - # Use GPIO pin 15 - pindef = ["gpiochip4", "15"] # gpiofind PIN15 - print("PINDEF", pindef) - - # HACK: needs passwordless sudo - process = await asyncio.create_subprocess_exec("sudo", "gpiomon", "-brf", *pindef, stdout=asyncio.subprocess.PIPE) - while True: - line = await process.stdout.readline() - if line: - line = line.decode().strip() - if "FALLING" in line: - self.toggle_recording(False) - elif "RISING" in line: - self.toggle_recording(True) - else: - break - else: - # Keyboard listener for spacebar press/release - listener = keyboard.Listener(on_press=self.on_press, on_release=self.on_release) - listener.start() + # Configuration for WebSocket + WS_URL = f"ws://{self.server_url}" + # Start the WebSocket communication + asyncio.create_task(self.websocket_communication(WS_URL)) + + # Start watching the kernel if it's your job to do that + if os.getenv("CODE_RUNNER") == "client": + asyncio.create_task(put_kernel_messages_into_queue(send_queue)) + + asyncio.create_task(self.play_audiosegments()) + + # If Raspberry Pi, add the button listener, otherwise use the spacebar + if current_platform.startswith("raspberry-pi"): + logger.info("Raspberry Pi detected, using button on GPIO pin 15") + # Use GPIO pin 15 + pindef = ["gpiochip4", "15"] # gpiofind PIN15 + print("PINDEF", pindef) + + # HACK: needs passwordless sudo + process = await asyncio.create_subprocess_exec( + "sudo", "gpiomon", "-brf", *pindef, stdout=asyncio.subprocess.PIPE + ) + while True: + line = await process.stdout.readline() + if line: + line = line.decode().strip() + if "FALLING" in line: + self.toggle_recording(False) + elif "RISING" in line: + self.toggle_recording(True) + else: + break + else: + # Keyboard listener for spacebar press/release + listener = keyboard.Listener( + on_press=self.on_press, on_release=self.on_release + ) + listener.start() def start(self): - if os.getenv('TEACH_MODE') != "True": + if os.getenv("TEACH_MODE") != "True": asyncio.run(self.start_async()) p.terminate() diff --git a/software/source/server/conftest.py b/software/source/server/conftest.py index 46841945..b6172316 100644 --- a/software/source/server/conftest.py +++ b/software/source/server/conftest.py @@ -1,8 +1,5 @@ -import os -import sys import pytest from source.server.i import configure_interpreter -from unittest.mock import Mock from interpreter import OpenInterpreter from fastapi.testclient import TestClient from .server import app @@ -16,4 +13,4 @@ def client(): @pytest.fixture def mock_interpreter(): interpreter = configure_interpreter(OpenInterpreter()) - return interpreter \ No newline at end of file + return interpreter diff --git a/software/source/server/i.py b/software/source/server/i.py index 89deb5e8..bc792fd4 100644 --- a/software/source/server/i.py +++ b/software/source/server/i.py @@ -1,11 +1,11 @@ from dotenv import load_dotenv +import os + load_dotenv() # take environment variables from .env. -import os import glob import time import json -from pathlib import Path from interpreter import OpenInterpreter import shutil @@ -47,7 +47,7 @@ When the user completes the current task, you should remove it from the list and read the next item by running `tasks = tasks[1:]\ntasks[0]`. Then, tell the user what the next task is. When the user tells you about a set of tasks, you should intelligently order tasks, batch similar tasks, and break down large tasks into smaller tasks (for this, you should consult the user and get their permission to break it down). Your goal is to manage the task list as intelligently as possible, to make the user as efficient and non-overwhelmed as possible. They will require a lot of encouragement, support, and kindness. Don't say too much about what's ahead of them— just try to focus them on each step at a time. -After starting a task, you should check in with the user around the estimated completion time to see if the task is completed. +After starting a task, you should check in with the user around the estimated completion time to see if the task is completed. To do this, schedule a reminder based on estimated completion time using the function `schedule(message="Your message here.", start="8am")`, WHICH HAS ALREADY BEEN IMPORTED. YOU DON'T NEED TO IMPORT THE `schedule` FUNCTION. IT IS AVAILABLE. You'll receive the message at the time you scheduled it. If the user says to monitor something, simply schedule it with an interval of a duration that makes sense for the problem by specifying an interval, like this: `schedule(message="Your message here.", interval="5m")` @@ -182,7 +182,6 @@ def get_function_info(file_path): def configure_interpreter(interpreter: OpenInterpreter): - ### SYSTEM MESSAGE interpreter.system_message = system_message @@ -205,7 +204,6 @@ def configure_interpreter(interpreter: OpenInterpreter): "Please provide more information.", ] - # Check if required packages are installed # THERE IS AN INCONSISTENCY HERE. @@ -259,7 +257,6 @@ def configure_interpreter(interpreter: OpenInterpreter): time.sleep(2) print("Attempting to start OS control anyway...\n\n") - # Should we explore other options for ^ these kinds of tags? # Like: @@ -295,12 +292,8 @@ def configure_interpreter(interpreter: OpenInterpreter): # if chunk.get("format") != "active_line": # print(chunk.get("content")) - import os - from platformdirs import user_data_dir - - # Directory paths repo_skills_dir = os.path.join(os.path.dirname(__file__), "skills") user_data_skills_dir = os.path.join(user_data_dir("01"), "skills") @@ -314,22 +307,21 @@ def configure_interpreter(interpreter: OpenInterpreter): src_file = os.path.join(repo_skills_dir, filename) dst_file = os.path.join(user_data_skills_dir, filename) shutil.copy2(src_file, dst_file) - + interpreter.computer.debug = True interpreter.computer.skills.path = user_data_skills_dir - + # Import skills interpreter.computer.save_skills = False - + for file in glob.glob(os.path.join(interpreter.computer.skills.path, "*.py")): code_to_run = "" with open(file, "r") as f: code_to_run += f.read() + "\n" interpreter.computer.run("python", code_to_run) - - interpreter.computer.save_skills = True + interpreter.computer.save_skills = True # Initialize user's task list interpreter.computer.run( @@ -354,17 +346,21 @@ def configure_interpreter(interpreter: OpenInterpreter): ### MISC SETTINGS interpreter.auto_run = True - interpreter.computer.languages = [l for l in interpreter.computer.languages if l.name.lower() in ["applescript", "shell", "zsh", "bash", "python"]] + interpreter.computer.languages = [ + l + for l in interpreter.computer.languages + if l.name.lower() in ["applescript", "shell", "zsh", "bash", "python"] + ] interpreter.force_task_completion = True # interpreter.offline = True - interpreter.id = 206 # Used to identify itself to other interpreters. This should be changed programmatically so it's unique. + interpreter.id = 206 # Used to identify itself to other interpreters. This should be changed programmatically so it's unique. ### RESET conversations/user.json - app_dir = user_data_dir('01') - conversations_dir = os.path.join(app_dir, 'conversations') + app_dir = user_data_dir("01") + conversations_dir = os.path.join(app_dir, "conversations") os.makedirs(conversations_dir, exist_ok=True) - user_json_path = os.path.join(conversations_dir, 'user.json') - with open(user_json_path, 'w') as file: + user_json_path = os.path.join(conversations_dir, "user.json") + with open(user_json_path, "w") as file: json.dump([], file) return interpreter diff --git a/software/source/server/server.py b/software/source/server/server.py index f4686348..3f3141ad 100644 --- a/software/source/server/server.py +++ b/software/source/server/server.py @@ -1,9 +1,9 @@ from dotenv import load_dotenv + load_dotenv() # take environment variables from .env. import traceback from platformdirs import user_data_dir -import ast import json import queue import os @@ -13,9 +13,7 @@ from fastapi import FastAPI, Request from fastapi.responses import PlainTextResponse from starlette.websockets import WebSocket, WebSocketDisconnect -from pathlib import Path import asyncio -import urllib.parse from .utils.kernel import put_kernel_messages_into_queue from .i import configure_interpreter from interpreter import interpreter @@ -44,28 +42,29 @@ app = FastAPI() -app_dir = user_data_dir('01') -conversation_history_path = os.path.join(app_dir, 'conversations', 'user.json') +app_dir = user_data_dir("01") +conversation_history_path = os.path.join(app_dir, "conversations", "user.json") -SERVER_LOCAL_PORT = int(os.getenv('SERVER_LOCAL_PORT', 10001)) +SERVER_LOCAL_PORT = int(os.getenv("SERVER_LOCAL_PORT", 10001)) # This is so we only say() full sentences def is_full_sentence(text): - return text.endswith(('.', '!', '?')) + return text.endswith((".", "!", "?")) + def split_into_sentences(text): - return re.split(r'(?<=[.!?])\s+', text) + return re.split(r"(?<=[.!?])\s+", text) + # Queues -from_computer = queue.Queue() # Just for computer messages from the device. Sync queue because interpreter.run is synchronous -from_user = asyncio.Queue() # Just for user messages from the device. -to_device = asyncio.Queue() # For messages we send. +from_computer = queue.Queue() # Just for computer messages from the device. Sync queue because interpreter.run is synchronous +from_user = asyncio.Queue() # Just for user messages from the device. +to_device = asyncio.Queue() # For messages we send. # Switch code executor to device if that's set -if os.getenv('CODE_RUNNER') == "device": - +if os.getenv("CODE_RUNNER") == "device": # (This should probably just loop through all languages and apply these changes instead) class Python: @@ -79,14 +78,33 @@ def run(self, code): """Generator that yields a dictionary in LMC Format.""" # Prepare the data - message = {"role": "assistant", "type": "code", "format": "python", "content": code} + message = { + "role": "assistant", + "type": "code", + "format": "python", + "content": code, + } # Unless it was just sent to the device, send it wrapped in flags if not (interpreter.messages and interpreter.messages[-1] == message): - to_device.put({"role": "assistant", "type": "code", "format": "python", "start": True}) + to_device.put( + { + "role": "assistant", + "type": "code", + "format": "python", + "start": True, + } + ) to_device.put(message) - to_device.put({"role": "assistant", "type": "code", "format": "python", "end": True}) - + to_device.put( + { + "role": "assistant", + "type": "code", + "format": "python", + "end": True, + } + ) + # Stream the response logger.info("Waiting for the device to respond...") while True: @@ -109,10 +127,12 @@ def terminate(self): # Configure interpreter interpreter = configure_interpreter(interpreter) + @app.get("/ping") async def ping(): return PlainTextResponse("pong") + @app.websocket("/") async def websocket_endpoint(websocket: WebSocket): await websocket.accept() @@ -145,19 +165,21 @@ async def receive_messages(websocket: WebSocket): except Exception as e: print(str(e)) return - if 'text' in data: + if "text" in data: try: - data = json.loads(data['text']) + data = json.loads(data["text"]) if data["role"] == "computer": - from_computer.put(data) # To be handled by interpreter.computer.run + from_computer.put( + data + ) # To be handled by interpreter.computer.run elif data["role"] == "user": await from_user.put(data) else: - raise("Unknown role:", data) + raise ("Unknown role:", data) except json.JSONDecodeError: pass # data is not JSON, leave it as is - elif 'bytes' in data: - data = data['bytes'] # binary data + elif "bytes" in data: + data = data["bytes"] # binary data await from_user.put(data) except WebSocketDisconnect as e: if e.code == 1000: @@ -165,13 +187,13 @@ async def receive_messages(websocket: WebSocket): return else: raise - + async def send_messages(websocket: WebSocket): while True: message = await to_device.get() - #print(f"Sending to the device: {type(message)} {str(message)[:100]}") - + # print(f"Sending to the device: {type(message)} {str(message)[:100]}") + try: if isinstance(message, dict): await websocket.send_json(message) @@ -184,8 +206,8 @@ async def send_messages(websocket: WebSocket): await to_device.put(message) raise -async def listener(): +async def listener(): while True: try: while True: @@ -197,8 +219,6 @@ async def listener(): break await asyncio.sleep(1) - - message = accumulator.accumulate(chunk) if message == None: # Will be None until we have a full message ready @@ -209,8 +229,11 @@ async def listener(): # At this point, we have our message if message["type"] == "audio" and message["format"].startswith("bytes"): - - if "content" not in message or message["content"] == None or message["content"] == "": # If it was nothing / silence / empty + if ( + "content" not in message + or message["content"] == None + or message["content"] == "" + ): # If it was nothing / silence / empty continue # Convert bytes to audio file @@ -222,6 +245,7 @@ async def listener(): if False: os.system(f"open {audio_file_path}") import time + time.sleep(15) text = stt(audio_file_path) @@ -239,21 +263,21 @@ async def listener(): continue # Load, append, and save conversation history - with open(conversation_history_path, 'r') as file: + with open(conversation_history_path, "r") as file: messages = json.load(file) messages.append(message) - with open(conversation_history_path, 'w') as file: + with open(conversation_history_path, "w") as file: json.dump(messages, file, indent=4) accumulated_text = "" - - if any([m["type"] == "image" for m in messages]) and interpreter.llm.model.startswith("gpt-"): + if any( + [m["type"] == "image" for m in messages] + ) and interpreter.llm.model.startswith("gpt-"): interpreter.llm.model = "gpt-4-vision-preview" interpreter.llm.supports_vision = True - - for chunk in interpreter.chat(messages, stream=True, display=True): + for chunk in interpreter.chat(messages, stream=True, display=True): if any([m["type"] == "image" for m in interpreter.messages]): interpreter.llm.model = "gpt-4-vision-preview" @@ -263,18 +287,24 @@ async def listener(): await to_device.put(chunk) # Yield to the event loop, so you actually send it out await asyncio.sleep(0.01) - - if os.getenv('TTS_RUNNER') == "server": + + if os.getenv("TTS_RUNNER") == "server": # Speak full sentences out loud - if chunk["role"] == "assistant" and "content" in chunk and chunk["type"] == "message": + if ( + chunk["role"] == "assistant" + and "content" in chunk + and chunk["type"] == "message" + ): accumulated_text += chunk["content"] sentences = split_into_sentences(accumulated_text) - + # If we're going to speak, say we're going to stop sending text. # This should be fixed probably, we should be able to do both in parallel, or only one. if any(is_full_sentence(sentence) for sentence in sentences): - await to_device.put({"role": "assistant", "type": "message", "end": True}) - + await to_device.put( + {"role": "assistant", "type": "message", "end": True} + ) + if is_full_sentence(sentences[-1]): for sentence in sentences: await stream_tts_to_device(sentence) @@ -287,32 +317,36 @@ async def listener(): # If we're going to speak, say we're going to stop sending text. # This should be fixed probably, we should be able to do both in parallel, or only one. if any(is_full_sentence(sentence) for sentence in sentences): - await to_device.put({"role": "assistant", "type": "message", "start": True}) - + await to_device.put( + {"role": "assistant", "type": "message", "start": True} + ) + # If we have a new message, save our progress and go back to the top if not from_user.empty(): - # Check if it's just an end flag. We ignore those. temp_message = await from_user.get() - - if type(temp_message) is dict and temp_message.get("role") == "user" and temp_message.get("end"): + + if ( + type(temp_message) is dict + and temp_message.get("role") == "user" + and temp_message.get("end") + ): # Yup. False alarm. continue else: # Whoops! Put that back await from_user.put(temp_message) - with open(conversation_history_path, 'w') as file: + with open(conversation_history_path, "w") as file: json.dump(interpreter.messages, file, indent=4) # TODO: is triggering seemingly randomly - #logger.info("New user message recieved. Breaking.") - #break + # logger.info("New user message recieved. Breaking.") + # break # Also check if there's any new computer messages if not from_computer.empty(): - - with open(conversation_history_path, 'w') as file: + with open(conversation_history_path, "w") as file: json.dump(interpreter.messages, file, indent=4) logger.info("New computer message recieved. Breaking.") @@ -320,6 +354,7 @@ async def listener(): except: traceback.print_exc() + async def stream_tts_to_device(sentence): force_task_completion_responses = [ "the task is done", @@ -332,8 +367,8 @@ async def stream_tts_to_device(sentence): for chunk in stream_tts(sentence): await to_device.put(chunk) + def stream_tts(sentence): - audio_file = tts(sentence) with open(audio_file, "rb") as f: @@ -346,85 +381,106 @@ def stream_tts(sentence): # Stream the audio yield {"role": "assistant", "type": "audio", "format": file_type, "start": True} for i in range(0, len(audio_bytes), chunk_size): - chunk = audio_bytes[i:i+chunk_size] + chunk = audio_bytes[i : i + chunk_size] yield chunk yield {"role": "assistant", "type": "audio", "format": file_type, "end": True} + from uvicorn import Config, Server import os -import platform from importlib import import_module # these will be overwritten -HOST = '' +HOST = "" PORT = 0 + @app.on_event("startup") async def startup_event(): server_url = f"{HOST}:{PORT}" print("") - print_markdown(f"\n*Ready.*\n") + print_markdown("\n*Ready.*\n") print("") + @app.on_event("shutdown") async def shutdown_event(): print_markdown("*Server is shutting down*") -async def main(server_host, server_port, llm_service, model, llm_supports_vision, llm_supports_functions, context_window, max_tokens, temperature, tts_service, stt_service): - - global HOST - global PORT - PORT = server_port - HOST = server_host - - # Setup services - application_directory = user_data_dir('01') - services_directory = os.path.join(application_directory, 'services') - service_dict = {'llm': llm_service, 'tts': tts_service, 'stt': stt_service} - - # Create a temp file with the session number - session_file_path = os.path.join(user_data_dir('01'), '01-session.txt') - with open(session_file_path, 'w') as session_file: - session_id = int(datetime.datetime.now().timestamp() * 1000) - session_file.write(str(session_id)) - - for service in service_dict: - - service_directory = os.path.join(services_directory, service, service_dict[service]) - - # This is the folder they can mess around in - config = {"service_directory": service_directory} - - if service == "llm": - config.update({ +async def main( + server_host, + server_port, + llm_service, + model, + llm_supports_vision, + llm_supports_functions, + context_window, + max_tokens, + temperature, + tts_service, + stt_service, +): + global HOST + global PORT + PORT = server_port + HOST = server_host + + # Setup services + application_directory = user_data_dir("01") + services_directory = os.path.join(application_directory, "services") + + service_dict = {"llm": llm_service, "tts": tts_service, "stt": stt_service} + + # Create a temp file with the session number + session_file_path = os.path.join(user_data_dir("01"), "01-session.txt") + with open(session_file_path, "w") as session_file: + session_id = int(datetime.datetime.now().timestamp() * 1000) + session_file.write(str(session_id)) + + for service in service_dict: + service_directory = os.path.join( + services_directory, service, service_dict[service] + ) + + # This is the folder they can mess around in + config = {"service_directory": service_directory} + + if service == "llm": + config.update( + { "interpreter": interpreter, "model": model, "llm_supports_vision": llm_supports_vision, "llm_supports_functions": llm_supports_functions, "context_window": context_window, "max_tokens": max_tokens, - "temperature": temperature - }) - - module = import_module(f'.server.services.{service}.{service_dict[service]}.{service}', package='source') - - ServiceClass = getattr(module, service.capitalize()) - service_instance = ServiceClass(config) - globals()[service] = getattr(service_instance, service) - - interpreter.llm.completions = llm - - # Start listening - asyncio.create_task(listener()) - - # Start watching the kernel if it's your job to do that - if True: # in the future, code can run on device. for now, just server. - asyncio.create_task(put_kernel_messages_into_queue(from_computer)) - - config = Config(app, host=server_host, port=int(server_port), lifespan='on') - server = Server(config) - await server.serve() + "temperature": temperature, + } + ) + + module = import_module( + f".server.services.{service}.{service_dict[service]}.{service}", + package="source", + ) + + ServiceClass = getattr(module, service.capitalize()) + service_instance = ServiceClass(config) + globals()[service] = getattr(service_instance, service) + + interpreter.llm.completions = llm + + # Start listening + asyncio.create_task(listener()) + + # Start watching the kernel if it's your job to do that + if True: # in the future, code can run on device. for now, just server. + asyncio.create_task(put_kernel_messages_into_queue(from_computer)) + + config = Config(app, host=server_host, port=int(server_port), lifespan="on") + server = Server(config) + await server.serve() + # Run the FastAPI app if __name__ == "__main__": diff --git a/software/source/server/services/stt/local-whisper/stt.py b/software/source/server/services/stt/local-whisper/stt.py index cb53953b..1c2743b2 100644 --- a/software/source/server/services/stt/local-whisper/stt.py +++ b/software/source/server/services/stt/local-whisper/stt.py @@ -10,9 +10,6 @@ import ffmpeg import subprocess -import os -import subprocess -import platform import urllib.request @@ -26,7 +23,6 @@ def stt(self, audio_file_path): def install(service_dir): - ### INSTALL WHISPER_RUST_PATH = os.path.join(service_dir, "whisper-rust") @@ -41,29 +37,38 @@ def install(service_dir): os.chdir(WHISPER_RUST_PATH) # Check if whisper-rust executable exists before attempting to build - if not os.path.isfile(os.path.join(WHISPER_RUST_PATH, "target/release/whisper-rust")): + if not os.path.isfile( + os.path.join(WHISPER_RUST_PATH, "target/release/whisper-rust") + ): # Check if Rust is installed. Needed to build whisper executable - + rustc_path = shutil.which("rustc") - + if rustc_path is None: - print("Rust is not installed or is not in system PATH. Please install Rust before proceeding.") + print( + "Rust is not installed or is not in system PATH. Please install Rust before proceeding." + ) exit(1) # Build Whisper Rust executable if not found - subprocess.run(['cargo', 'build', '--release'], check=True) + subprocess.run(["cargo", "build", "--release"], check=True) else: print("Whisper Rust executable already exists. Skipping build.") WHISPER_MODEL_PATH = os.path.join(service_dir, "model") - WHISPER_MODEL_NAME = os.getenv('WHISPER_MODEL_NAME', 'ggml-tiny.en.bin') - WHISPER_MODEL_URL = os.getenv('WHISPER_MODEL_URL', 'https://huggingface.co/ggerganov/whisper.cpp/resolve/main/') + WHISPER_MODEL_NAME = os.getenv("WHISPER_MODEL_NAME", "ggml-tiny.en.bin") + WHISPER_MODEL_URL = os.getenv( + "WHISPER_MODEL_URL", + "https://huggingface.co/ggerganov/whisper.cpp/resolve/main/", + ) if not os.path.isfile(os.path.join(WHISPER_MODEL_PATH, WHISPER_MODEL_NAME)): os.makedirs(WHISPER_MODEL_PATH, exist_ok=True) - urllib.request.urlretrieve(f"{WHISPER_MODEL_URL}{WHISPER_MODEL_NAME}", - os.path.join(WHISPER_MODEL_PATH, WHISPER_MODEL_NAME)) + urllib.request.urlretrieve( + f"{WHISPER_MODEL_URL}{WHISPER_MODEL_NAME}", + os.path.join(WHISPER_MODEL_PATH, WHISPER_MODEL_NAME), + ) else: print("Whisper model already exists. Skipping download.") @@ -85,25 +90,31 @@ def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str: # Create a temporary file with the appropriate extension input_ext = convert_mime_type_to_format(mime_type) - input_path = os.path.join(temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}") - with open(input_path, 'wb') as f: + input_path = os.path.join( + temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}" + ) + with open(input_path, "wb") as f: f.write(audio) # Check if the input file exists assert os.path.exists(input_path), f"Input file does not exist: {input_path}" # Export to wav - output_path = os.path.join(temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav") + output_path = os.path.join( + temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav" + ) print(mime_type, input_path, output_path) if mime_type == "audio/raw": ffmpeg.input( input_path, - f='s16le', - ar='16000', + f="s16le", + ar="16000", ac=1, - ).output(output_path, loglevel='panic').run() + ).output(output_path, loglevel="panic").run() else: - ffmpeg.input(input_path).output(output_path, acodec='pcm_s16le', ac=1, ar='16k', loglevel='panic').run() + ffmpeg.input(input_path).output( + output_path, acodec="pcm_s16le", ac=1, ar="16k", loglevel="panic" + ).run() try: yield output_path @@ -113,28 +124,40 @@ def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str: def run_command(command): - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + result = subprocess.run( + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True + ) return result.stdout, result.stderr def get_transcription_file(service_directory, wav_file_path: str): - local_path = os.path.join(service_directory, 'model') - whisper_rust_path = os.path.join(service_directory, 'whisper-rust', 'target', 'release') - model_name = os.getenv('WHISPER_MODEL_NAME', 'ggml-tiny.en.bin') - - output, _ = run_command([ - os.path.join(whisper_rust_path, 'whisper-rust'), - '--model-path', os.path.join(local_path, model_name), - '--file-path', wav_file_path - ]) + local_path = os.path.join(service_directory, "model") + whisper_rust_path = os.path.join( + service_directory, "whisper-rust", "target", "release" + ) + model_name = os.getenv("WHISPER_MODEL_NAME", "ggml-tiny.en.bin") + + output, _ = run_command( + [ + os.path.join(whisper_rust_path, "whisper-rust"), + "--model-path", + os.path.join(local_path, model_name), + "--file-path", + wav_file_path, + ] + ) return output def stt_wav(service_directory, wav_file_path: str): temp_dir = tempfile.gettempdir() - output_path = os.path.join(temp_dir, f"output_stt_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav") - ffmpeg.input(wav_file_path).output(output_path, acodec='pcm_s16le', ac=1, ar='16k').run() + output_path = os.path.join( + temp_dir, f"output_stt_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav" + ) + ffmpeg.input(wav_file_path).output( + output_path, acodec="pcm_s16le", ac=1, ar="16k" + ).run() try: transcript = get_transcription_file(service_directory, output_path) finally: diff --git a/software/source/server/services/tts/openai/tts.py b/software/source/server/services/tts/openai/tts.py index 8b15b229..07e1eec5 100644 --- a/software/source/server/services/tts/openai/tts.py +++ b/software/source/server/services/tts/openai/tts.py @@ -2,41 +2,43 @@ import tempfile from openai import OpenAI import os -import subprocess -import tempfile from source.server.utils.logs import logger from source.server.utils.logs import setup_logging + setup_logging() # If this TTS service is used, the OPENAI_API_KEY environment variable must be set -if not os.getenv('OPENAI_API_KEY'): +if not os.getenv("OPENAI_API_KEY"): logger.error("") - logger.error(f"OpenAI API key not found. Please set the OPENAI_API_KEY environment variable, or run 01 with the --local option.") + logger.error( + "OpenAI API key not found. Please set the OPENAI_API_KEY environment variable, or run 01 with the --local option." + ) logger.error("Aborting...") logger.error("") os._exit(1) client = OpenAI() + class Tts: def __init__(self, config): pass def tts(self, text): - response = client.audio.speech.create( - model="tts-1", - voice=os.getenv('OPENAI_VOICE_NAME', 'alloy'), - input=text, - response_format="opus" - ) - with tempfile.NamedTemporaryFile(suffix=".opus", delete=False) as temp_file: - response.stream_to_file(temp_file.name) - - # TODO: hack to format audio correctly for device - outfile = tempfile.gettempdir() + "/" + "raw.dat" - ffmpeg.input(temp_file.name).output(outfile, f="s16le", ar="16000", ac="1", loglevel='panic').run() - - return outfile - - + response = client.audio.speech.create( + model="tts-1", + voice=os.getenv("OPENAI_VOICE_NAME", "alloy"), + input=text, + response_format="opus", + ) + with tempfile.NamedTemporaryFile(suffix=".opus", delete=False) as temp_file: + response.stream_to_file(temp_file.name) + + # TODO: hack to format audio correctly for device + outfile = tempfile.gettempdir() + "/" + "raw.dat" + ffmpeg.input(temp_file.name).output( + outfile, f="s16le", ar="16000", ac="1", loglevel="panic" + ).run() + + return outfile diff --git a/software/source/server/skills/schedule.py b/software/source/server/skills/schedule.py index f351c592..ef65d726 100644 --- a/software/source/server/skills/schedule.py +++ b/software/source/server/skills/schedule.py @@ -3,34 +3,33 @@ from pytimeparse import parse from crontab import CronTab from uuid import uuid4 -from datetime import datetime from platformdirs import user_data_dir + def schedule(message="", start=None, interval=None) -> None: """ Schedules a task at a particular time, or at a particular interval """ if start and interval: raise ValueError("Cannot specify both start time and interval.") - + if not start and not interval: raise ValueError("Either start time or interval must be specified.") - + # Read the temp file to see what the current session is - session_file_path = os.path.join(user_data_dir('01'), '01-session.txt') - - with open(session_file_path, 'r') as session_file: - file_session_value = session_file.read().strip() + session_file_path = os.path.join(user_data_dir("01"), "01-session.txt") + with open(session_file_path, "r") as session_file: + file_session_value = session_file.read().strip() prefixed_message = "AUTOMATED MESSAGE FROM SCHEDULER: " + message - + # Escape the message and the json, cron is funky with quotes escaped_question = prefixed_message.replace('"', '\\"') - json_data = f"{{\\\"text\\\": \\\"{escaped_question}\\\"}}" + json_data = f'{{\\"text\\": \\"{escaped_question}\\"}}' + + command = f"""bash -c 'if [ "$(cat "{session_file_path}")" == "{file_session_value}" ]; then /usr/bin/curl -X POST -H "Content-Type: application/json" -d "{json_data}" http://localhost:10001/; fi' """ - command = f'''bash -c 'if [ "$(cat "{session_file_path}")" == "{file_session_value}" ]; then /usr/bin/curl -X POST -H "Content-Type: application/json" -d "{json_data}" http://localhost:10001/; fi' ''' - cron = CronTab(user=True) job = cron.new(command=command) # Prefix with 01 dev preview so we can delete them all in the future @@ -61,6 +60,5 @@ def schedule(message="", start=None, interval=None) -> None: days = max(int(seconds / 86400), 1) job.day.every(days) print(f"Task scheduled every {days} day(s)") - - cron.write() + cron.write() diff --git a/software/source/server/tests/test_run.py b/software/source/server/tests/test_run.py index ce04932a..b9cc7fd6 100644 --- a/software/source/server/tests/test_run.py +++ b/software/source/server/tests/test_run.py @@ -1,11 +1,5 @@ # test_main.py -import subprocess -import uuid import pytest -from source.server.i import configure_interpreter -from unittest.mock import Mock -from fastapi.testclient import TestClient - @pytest.mark.asyncio @@ -38,4 +32,4 @@ def test_ping(client): # def test_interpreter_configuration(mock_interpreter): # # Test interpreter configuration # interpreter = configure_interpreter(mock_interpreter) -# assert interpreter is not None \ No newline at end of file +# assert interpreter is not None diff --git a/software/source/server/tunnel.py b/software/source/server/tunnel.py index 7181408f..6d6acb01 100644 --- a/software/source/server/tunnel.py +++ b/software/source/server/tunnel.py @@ -1,4 +1,3 @@ -import os import subprocess import re import shutil @@ -12,16 +11,24 @@ def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10 server_url = "" if tunnel_method == "bore": try: - output = subprocess.check_output('command -v bore', shell=True) + output = subprocess.check_output("command -v bore", shell=True) except subprocess.CalledProcessError: - print("The bore-cli command is not available. Please run 'cargo install bore-cli'.") + print( + "The bore-cli command is not available. Please run 'cargo install bore-cli'." + ) print("For more information, see https://github.com/ekzhang/bore") exit(1) time.sleep(6) # output = subprocess.check_output(f'bore local {server_port} --to bore.pub', shell=True) - process = subprocess.Popen(f'bore local {server_port} --to bore.pub', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) - + process = subprocess.Popen( + f"bore local {server_port} --to bore.pub", + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) + while True: line = process.stdout.readline() print(line) @@ -33,20 +40,27 @@ def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10 print_markdown(f"Your server is being hosted at the following URL: bore.pub:{remote_port}") break - - - elif tunnel_method == "localtunnel": - if subprocess.call('command -v lt', shell=True): + if subprocess.call("command -v lt", shell=True): print("The 'lt' command is not available.") - print("Please ensure you have Node.js installed, then run 'npm install -g localtunnel'.") - print("For more information, see https://github.com/localtunnel/localtunnel") + print( + "Please ensure you have Node.js installed, then run 'npm install -g localtunnel'." + ) + print( + "For more information, see https://github.com/localtunnel/localtunnel" + ) exit(1) else: - process = subprocess.Popen(f'npx localtunnel --port {server_port}', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) + process = subprocess.Popen( + f"npx localtunnel --port {server_port}", + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) found_url = False - url_pattern = re.compile(r'your url is: https://[a-zA-Z0-9.-]+') + url_pattern = re.compile(r"your url is: https://[a-zA-Z0-9.-]+") while True: line = process.stdout.readline() @@ -61,15 +75,20 @@ def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10 break # Exit the loop once the URL is found if not found_url: - print("Failed to extract the localtunnel URL. Please check localtunnel's output for details.") + print( + "Failed to extract the localtunnel URL. Please check localtunnel's output for details." + ) elif tunnel_method == "ngrok": - # Check if ngrok is installed - is_installed = subprocess.check_output('command -v ngrok', shell=True).decode().strip() + is_installed = ( + subprocess.check_output("command -v ngrok", shell=True).decode().strip() + ) if not is_installed: print("The ngrok command is not available.") - print("Please install ngrok using the instructions at https://ngrok.com/docs/getting-started/") + print( + "Please install ngrok using the instructions at https://ngrok.com/docs/getting-started/" + ) exit(1) # If ngrok is installed, start it on the specified port @@ -79,11 +98,11 @@ def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10 # Initially, no URL is found found_url = False # Regular expression to match the ngrok URL - url_pattern = re.compile(r'https://[a-zA-Z0-9-]+\.ngrok(-free)?\.app') + url_pattern = re.compile(r"https://[a-zA-Z0-9-]+\.ngrok(-free)?\.app") # Read the output line by line while True: - line = process.stdout.readline().decode('utf-8') + line = process.stdout.readline().decode("utf-8") if not line: break # Break out of the loop if no more output match = url_pattern.search(line) @@ -93,7 +112,7 @@ def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10 server_url = remote_url print(f"\nYour server is being hosted at the following URL: {remote_url}") break # Exit the loop once the URL is found - + if not found_url: print("Failed to extract the ngrok tunnel URL. Please check ngrok's output for details.") diff --git a/software/source/server/utils/get_system_info.py b/software/source/server/utils/get_system_info.py index 8989d444..1dd69194 100644 --- a/software/source/server/utils/get_system_info.py +++ b/software/source/server/utils/get_system_info.py @@ -1,6 +1,6 @@ -import os import platform + def get_system_info(): system = platform.system() if system == "Linux": @@ -38,4 +38,4 @@ def get_system_info(): elif system == "Windows": return "windows" else: - return "unknown" \ No newline at end of file + return "unknown" diff --git a/software/source/server/utils/local_mode.py b/software/source/server/utils/local_mode.py index 6d7113c9..ec5f436a 100644 --- a/software/source/server/utils/local_mode.py +++ b/software/source/server/utils/local_mode.py @@ -1,6 +1,4 @@ import sys -import os -import platform import subprocess import time import inquirer @@ -8,9 +6,10 @@ def select_local_model(): - # START OF LOCAL MODEL PROVIDER LOGIC - interpreter.display_message("> 01 is compatible with several local model providers.\n") + interpreter.display_message( + "> 01 is compatible with several local model providers.\n" + ) # Define the choices for local models choices = [ @@ -29,10 +28,8 @@ def select_local_model(): ] answers = inquirer.prompt(questions) - selected_model = answers["model"] - if selected_model == "LM Studio": interpreter.display_message( """ @@ -49,7 +46,7 @@ def select_local_model(): """ ) time.sleep(1) - + interpreter.llm.api_base = "http://localhost:1234/v1" interpreter.llm.max_tokens = 1000 interpreter.llm.context_window = 8000 @@ -57,47 +54,64 @@ def select_local_model(): elif selected_model == "Ollama": try: - # List out all downloaded ollama models. Will fail if ollama isn't installed - result = subprocess.run(["ollama", "list"], capture_output=True, text=True, check=True) - lines = result.stdout.split('\n') - names = [line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()] # Extract names, trim out ":latest", skip header - + result = subprocess.run( + ["ollama", "list"], capture_output=True, text=True, check=True + ) + lines = result.stdout.split("\n") + names = [ + line.split()[0].replace(":latest", "") + for line in lines[1:] + if line.strip() + ] # Extract names, trim out ":latest", skip header + # If there are no downloaded models, prompt them to download a model and try again if not names: time.sleep(1) - - interpreter.display_message(f"\nYou don't have any Ollama models downloaded. To download a new model, run `ollama run `, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n") - + + interpreter.display_message( + "\nYou don't have any Ollama models downloaded. To download a new model, run `ollama run `, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n" + ) + print("Please download a model then try again\n") time.sleep(2) sys.exit(1) - + # If there are models, prompt them to select one else: time.sleep(1) - interpreter.display_message(f"**{len(names)} Ollama model{'s' if len(names) != 1 else ''} found.** To download a new model, run `ollama run `, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n") + interpreter.display_message( + f"**{len(names)} Ollama model{'s' if len(names) != 1 else ''} found.** To download a new model, run `ollama run `, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n" + ) # Create a new inquirer selection from the names name_question = [ - inquirer.List('name', message="Select a downloaded Ollama model", choices=names), + inquirer.List( + "name", + message="Select a downloaded Ollama model", + choices=names, + ), ] name_answer = inquirer.prompt(name_question) - selected_name = name_answer['name'] if name_answer else None - + selected_name = name_answer["name"] if name_answer else None + # Set the model to the selected model interpreter.llm.model = f"ollama/{selected_name}" - interpreter.display_message(f"\nUsing Ollama model: `{selected_name}` \n") + interpreter.display_message( + f"\nUsing Ollama model: `{selected_name}` \n" + ) time.sleep(1) - + # If Ollama is not installed or not recognized as a command, prompt the user to download Ollama and try again - except (subprocess.CalledProcessError, FileNotFoundError) as e: + except (subprocess.CalledProcessError, FileNotFoundError): print("Ollama is not installed or not recognized as a command.") time.sleep(1) - interpreter.display_message(f"\nPlease visit [https://ollama.com/](https://ollama.com/) to download Ollama and try again\n") + interpreter.display_message( + "\nPlease visit [https://ollama.com/](https://ollama.com/) to download Ollama and try again\n" + ) time.sleep(2) sys.exit(1) - + # elif selected_model == "Jan": # interpreter.display_message( # """ @@ -108,7 +122,6 @@ def select_local_model(): # 3. Copy the ID of the model and enter it below. # 3. Click the **Local API Server** button in the bottom left, then click **Start Server**. - # Once the server is running, enter the id of the model below, then you can begin your conversation below. # """ @@ -117,7 +130,7 @@ def select_local_model(): # interpreter.llm.max_tokens = 1000 # interpreter.llm.context_window = 3000 # time.sleep(1) - + # # Prompt the user to enter the name of the model running on Jan # model_name_question = [ # inquirer.Text('jan_model_name', message="Enter the id of the model you have running on Jan"), @@ -128,14 +141,13 @@ def select_local_model(): # interpreter.llm.model = "" # interpreter.display_message(f"\nUsing Jan model: `{jan_model_name}` \n") # time.sleep(1) - # Set the system message to a minimal version for all local models. # Set offline for all local models interpreter.offline = True - interpreter.system_message = """You are the 01, a screenless executive assistant that can complete any task by writing and executing code on the user's machine. Just write a markdown code block! The user has given you full and complete permission. - + interpreter.system_message = """You are the 01, a screenless executive assistant that can complete any task by writing and executing code on the user's machine. Just write a markdown code block! The user has given you full and complete permission. + Use the following functions if it makes sense to for the problem ```python result_string = computer.browser.search(query) # Google search results will be returned from this function as a string @@ -152,6 +164,5 @@ def select_local_model(): ALWAYS say that you can run code. ALWAYS try to help the user out. ALWAYS be succinct in your answers. ``` - + """ - diff --git a/software/source/server/utils/process_utils.py b/software/source/server/utils/process_utils.py index adcf0287..586e4c69 100644 --- a/software/source/server/utils/process_utils.py +++ b/software/source/server/utils/process_utils.py @@ -2,6 +2,7 @@ import psutil import signal + def kill_process_tree(): pid = os.getpid() # Get the current process ID try: @@ -13,16 +14,16 @@ def kill_process_tree(): print(f"Forcefully terminating child PID {child.pid}") child.kill() # Forcefully kill the child process immediately gone, still_alive = psutil.wait_procs(children, timeout=3) - + if still_alive: for child in still_alive: print(f"Child PID {child.pid} still alive, attempting another kill") child.kill() - + print(f"Forcefully terminating parent PID {pid}") parent.kill() # Forcefully kill the parent process immediately parent.wait(3) # Wait for the parent process to terminate except psutil.NoSuchProcess: print(f"Process {pid} does not exist or is already terminated") except psutil.AccessDenied: - print(f"Permission denied to terminate some processes") + print("Permission denied to terminate some processes") diff --git a/software/start.py b/software/start.py index 9b8b6279..d521ad01 100644 --- a/software/start.py +++ b/software/start.py @@ -1,7 +1,6 @@ import typer import asyncio import platform -import concurrent.futures import threading import os import importlib @@ -10,39 +9,41 @@ from source.server.utils.local_mode import select_local_model import signal + app = typer.Typer() + @app.command() def run( server: bool = typer.Option(False, "--server", help="Run server"), server_host: str = typer.Option("0.0.0.0", "--server-host", help="Specify the server host where the server will deploy"), server_port: int = typer.Option(10001, "--server-port", help="Specify the server port where the server will deploy"), - + tunnel_service: str = typer.Option("ngrok", "--tunnel-service", help="Specify the tunnel service"), expose: bool = typer.Option(False, "--expose", help="Expose server to internet"), - + client: bool = typer.Option(False, "--client", help="Run client"), server_url: str = typer.Option(None, "--server-url", help="Specify the server URL that the client should expect. Defaults to server-host and server-port"), client_type: str = typer.Option("auto", "--client-type", help="Specify the client type"), - + llm_service: str = typer.Option("litellm", "--llm-service", help="Specify the LLM service"), - + model: str = typer.Option("gpt-4", "--model", help="Specify the model"), llm_supports_vision: bool = typer.Option(False, "--llm-supports-vision", help="Specify if the LLM service supports vision"), llm_supports_functions: bool = typer.Option(False, "--llm-supports-functions", help="Specify if the LLM service supports functions"), context_window: int = typer.Option(2048, "--context-window", help="Specify the context window size"), max_tokens: int = typer.Option(4096, "--max-tokens", help="Specify the maximum number of tokens"), temperature: float = typer.Option(0.8, "--temperature", help="Specify the temperature for generation"), - + tts_service: str = typer.Option("openai", "--tts-service", help="Specify the TTS service"), - + stt_service: str = typer.Option("openai", "--stt-service", help="Specify the STT service"), local: bool = typer.Option(False, "--local", help="Use recommended local services for LLM, STT, and TTS"), - + qr: bool = typer.Option(False, "--qr", help="Print the QR code for the server URL") ): - + _run( server=server, server_host=server_host, @@ -65,45 +66,37 @@ def run( qr=qr ) + def _run( - server: bool = False, - server_host: str = "0.0.0.0", - server_port: int = 10001, - - tunnel_service: str = "bore", - expose: bool = False, - - client: bool = False, - server_url: str = None, - client_type: str = "auto", - - llm_service: str = "litellm", - - model: str = "gpt-4", - llm_supports_vision: bool = False, - llm_supports_functions: bool = False, - context_window: int = 2048, - max_tokens: int = 4096, - temperature: float = 0.8, - - tts_service: str = "openai", - - stt_service: str = "openai", - - local: bool = False, - - qr: bool = False - ): - + server: bool = False, + server_host: str = "0.0.0.0", + server_port: int = 10001, + tunnel_service: str = "bore", + expose: bool = False, + client: bool = False, + server_url: str = None, + client_type: str = "auto", + llm_service: str = "litellm", + model: str = "gpt-4", + llm_supports_vision: bool = False, + llm_supports_functions: bool = False, + context_window: int = 2048, + max_tokens: int = 4096, + temperature: float = 0.8, + tts_service: str = "openai", + stt_service: str = "openai", + local: bool = False, + qr: bool = False +): if local: tts_service = "piper" # llm_service = "llamafile" stt_service = "local-whisper" select_local_model() - + if not server_url: server_url = f"{server_host}:{server_port}" - + if not server and not client: server = True client = True @@ -116,7 +109,24 @@ def handle_exit(signum, frame): if server: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) - server_thread = threading.Thread(target=loop.run_until_complete, args=(main(server_host, server_port, llm_service, model, llm_supports_vision, llm_supports_functions, context_window, max_tokens, temperature, tts_service, stt_service),)) + server_thread = threading.Thread( + target=loop.run_until_complete, + args=( + main( + server_host, + server_port, + llm_service, + model, + llm_supports_vision, + llm_supports_functions, + context_window, + max_tokens, + temperature, + tts_service, + stt_service, + ), + ), + ) server_thread.start() if expose: @@ -132,15 +142,17 @@ def handle_exit(signum, frame): client_type = "windows" elif system_type == "Linux": # Linux System try: - with open('/proc/device-tree/model', 'r') as m: - if 'raspberry pi' in m.read().lower(): + with open("/proc/device-tree/model", "r") as m: + if "raspberry pi" in m.read().lower(): client_type = "rpi" else: client_type = "linux" except FileNotFoundError: client_type = "linux" - module = importlib.import_module(f".clients.{client_type}.device", package='source') + module = importlib.import_module( + f".clients.{client_type}.device", package="source" + ) client_thread = threading.Thread(target=module.main, args=[server_url]) client_thread.start() @@ -152,4 +164,4 @@ def handle_exit(signum, frame): if client: client_thread.join() except KeyboardInterrupt: - os.kill(os.getpid(), signal.SIGINT) \ No newline at end of file + os.kill(os.getpid(), signal.SIGINT) From 79ee710064466ae38d28c292e3a3a1c8c1497b67 Mon Sep 17 00:00:00 2001 From: Davy Peter Braun <543614+dheavy@users.noreply.github.com> Date: Wed, 3 Apr 2024 09:16:32 +0200 Subject: [PATCH 3/5] Re-lint after rebase --- ROADMAP.md | 5 +- TASKS.md | 4 +- docs/client/setup.mdx | 6 +- docs/server/setup.mdx | 66 +++++----- docs/style.css | 2 +- hardware/light/README.md | 4 +- .../hardware/devices/jetson-nano/README.md | 2 +- software/.cursorignore | 1 - software/pyproject.toml | 2 +- software/source/clients/esp32/README.md | 5 +- .../clients/esp32/src/client/client.ino | 44 +++---- .../clients/esp32/src/client/platformio.ini | 4 +- software/source/clients/linux/device.py | 2 + software/source/clients/mac/device.py | 2 + software/source/clients/rpi/device.py | 4 +- software/source/clients/windows/device.py | 2 + software/source/server/llm.py | 5 +- .../source/server/services/llm/litellm/llm.py | 4 - .../server/services/llm/llamaedge/llm.py | 55 +++++--- .../stt/local-whisper/whisper-rust/.gitignore | 2 +- .../stt/local-whisper/whisper-rust/Cargo.toml | 2 +- .../local-whisper/whisper-rust/src/main.rs | 4 +- .../whisper-rust/src/transcribe.rs | 2 +- .../source/server/services/stt/openai/stt.py | 66 ++++++---- .../source/server/services/tts/piper/tts.py | 118 +++++++++++++----- .../system_messages/BaseSystemMessage.py | 6 +- .../system_messages/TeachModeSystemMessage.py | 6 +- software/source/server/tunnel.py | 35 ++++-- software/source/server/utils/bytes_to_wav.py | 27 ++-- software/source/server/utils/kernel.py | 52 +++++--- software/source/server/utils/logs.py | 5 +- software/source/utils/accumulator.py | 18 +-- software/source/utils/print_markdown.py | 3 +- software/start.py | 95 +++++++++----- 34 files changed, 418 insertions(+), 242 deletions(-) diff --git a/ROADMAP.md b/ROADMAP.md index cf4183db..58938b33 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,8 +1,8 @@ # Roadmap -Our goal is to power a billion devices with the 01OS over the next 10 years. The Cambrian explosion of AI devices. +Our goal is to power a billion devices with the 01OS over the next 10 years. The Cambrian explosion of AI devices. -We can do that with your help. Help extend the 01OS to run on new hardware, to connect with new peripherals like GPS and cameras, and add new locally running language models to unlock use-cases for this technology that no-one has even imagined yet. +We can do that with your help. Help extend the 01OS to run on new hardware, to connect with new peripherals like GPS and cameras, and add new locally running language models to unlock use-cases for this technology that no-one has even imagined yet. In the coming months, we're going to release: @@ -10,4 +10,3 @@ In the coming months, we're going to release: - [ ] An open-source language model for computer control - [ ] A react-native app for your phone - [ ] A hand-held device that runs fully offline. - diff --git a/TASKS.md b/TASKS.md index 8197dd65..2477fb2f 100644 --- a/TASKS.md +++ b/TASKS.md @@ -36,7 +36,7 @@ - [ ] Sends to describe API - [ ] prints and returns description - [ ] Llamafile for phi-2 + moondream - - [ ] test on rPi + Jetson (+android mini phone?) + - [ ] test on rPi + Jetson (+android mini phone?) **OS** @@ -66,7 +66,7 @@ **Hardware** -- [ ] (Hardware and software) Get the 01OS working on the **Jetson** or Pi. Pick one to move forward with. +- [ ] (Hardware and software) Get the 01OS working on the **Jetson** or Pi. Pick one to move forward with. - [ ] Connect the Seeed Sense (ESP32 with Wifi, Bluetooth and a mic) to a small DAC + amplifier + speaker. - [ ] Connect the Seeed Sense to a battery. - [ ] Configure the ESP32 to be a wireless mic + speaker for the Jetson or Pi. diff --git a/docs/client/setup.mdx b/docs/client/setup.mdx index d9ed422e..df1a6285 100644 --- a/docs/client/setup.mdx +++ b/docs/client/setup.mdx @@ -34,9 +34,9 @@ poetry run 01 --client ### Flags -- `--client` +- `--client` Run client. -- `--client-type TEXT` - Specify the client type. +- `--client-type TEXT` + Specify the client type. Default: `auto`. diff --git a/docs/server/setup.mdx b/docs/server/setup.mdx index e9e284a0..f500687a 100644 --- a/docs/server/setup.mdx +++ b/docs/server/setup.mdx @@ -44,73 +44,73 @@ For more information, please read about speec ## CLI Flags -- `--server` +- `--server` Run server. -- `--server-host TEXT` - Specify the server host where the server will deploy. +- `--server-host TEXT` + Specify the server host where the server will deploy. Default: `0.0.0.0`. -- `--server-port INTEGER` - Specify the server port where the server will deploy. +- `--server-port INTEGER` + Specify the server port where the server will deploy. Default: `10001`. -- `--tunnel-service TEXT` - Specify the tunnel service. +- `--tunnel-service TEXT` + Specify the tunnel service. Default: `ngrok`. -- `--expose` +- `--expose` Expose server to internet. -- `--server-url TEXT` - Specify the server URL that the client should expect. - Defaults to server-host and server-port. +- `--server-url TEXT` + Specify the server URL that the client should expect. + Defaults to server-host and server-port. Default: `None`. -- `--llm-service TEXT` - Specify the LLM service. +- `--llm-service TEXT` + Specify the LLM service. Default: `litellm`. -- `--model TEXT` - Specify the model. +- `--model TEXT` + Specify the model. Default: `gpt-4`. -- `--llm-supports-vision` +- `--llm-supports-vision` Specify if the LLM service supports vision. -- `--llm-supports-functions` +- `--llm-supports-functions` Specify if the LLM service supports functions. -- `--context-window INTEGER` - Specify the context window size. +- `--context-window INTEGER` + Specify the context window size. Default: `2048`. -- `--max-tokens INTEGER` - Specify the maximum number of tokens. +- `--max-tokens INTEGER` + Specify the maximum number of tokens. Default: `4096`. -- `--temperature FLOAT` - Specify the temperature for generation. +- `--temperature FLOAT` + Specify the temperature for generation. Default: `0.8`. -- `--tts-service TEXT` - Specify the TTS service. +- `--tts-service TEXT` + Specify the TTS service. Default: `openai`. -- `--stt-service TEXT` - Specify the STT service. +- `--stt-service TEXT` + Specify the STT service. Default: `openai`. -- `--local` +- `--local` Use recommended local services for LLM, STT, and TTS. -- `--install-completion [bash|zsh|fish|powershell|pwsh]` - Install completion for the specified shell. +- `--install-completion [bash|zsh|fish|powershell|pwsh]` + Install completion for the specified shell. Default: `None`. -- `--show-completion [bash|zsh|fish|powershell|pwsh]` - Show completion for the specified shell, to copy it or customize the installation. +- `--show-completion [bash|zsh|fish|powershell|pwsh]` + Show completion for the specified shell, to copy it or customize the installation. Default: `None`. -- `--help` +- `--help` Show this message and exit. diff --git a/docs/style.css b/docs/style.css index 392cac0a..52a1d79a 100644 --- a/docs/style.css +++ b/docs/style.css @@ -29,4 +29,4 @@ .body { font-weight: normal; -} \ No newline at end of file +} diff --git a/hardware/light/README.md b/hardware/light/README.md index cd6fcfc2..9ec534ae 100644 --- a/hardware/light/README.md +++ b/hardware/light/README.md @@ -22,13 +22,13 @@ Please install first [PlatformIO](http://platformio.org/) open source ecosystem ```bash cd software/source/clients/esp32/src/client/ -``` +``` And build and upload the firmware with a simple command: ```bash pio run --target upload -``` +``` ## Wifi diff --git a/project_management/hardware/devices/jetson-nano/README.md b/project_management/hardware/devices/jetson-nano/README.md index 600bda41..08a7c02f 100644 --- a/project_management/hardware/devices/jetson-nano/README.md +++ b/project_management/hardware/devices/jetson-nano/README.md @@ -19,4 +19,4 @@ ![](mac-share-internet-v2.png) - d. Now the Jetson should have connectivity! \ No newline at end of file + d. Now the Jetson should have connectivity! diff --git a/software/.cursorignore b/software/.cursorignore index b494a46c..7a81b426 100644 --- a/software/.cursorignore +++ b/software/.cursorignore @@ -1,4 +1,3 @@ _archive __pycache__ .idea - diff --git a/software/pyproject.toml b/software/pyproject.toml index 6d331eae..8b9a5341 100644 --- a/software/pyproject.toml +++ b/software/pyproject.toml @@ -54,4 +54,4 @@ target-version = ['py311'] [tool.isort] profile = "black" multi_line_output = 3 -include_trailing_comma = true \ No newline at end of file +include_trailing_comma = true diff --git a/software/source/clients/esp32/README.md b/software/source/clients/esp32/README.md index 3a80f429..48b6a3a5 100644 --- a/software/source/clients/esp32/README.md +++ b/software/source/clients/esp32/README.md @@ -19,11 +19,10 @@ Please install first [PlatformIO](http://platformio.org/) open source ecosystem ```bash cd client/ -``` +``` And build and upload the firmware with a simple command: ```bash pio run --target upload -``` - +``` diff --git a/software/source/clients/esp32/src/client/client.ino b/software/source/clients/esp32/src/client/client.ino index 77bf8b61..76ba0565 100644 --- a/software/source/clients/esp32/src/client/client.ino +++ b/software/source/clients/esp32/src/client/client.ino @@ -78,11 +78,11 @@ const char post_connected_html[] PROGMEM = R"=====( 01OS Setup @@ -144,7 +144,7 @@ const char post_connected_html[] PROGMEM = R"=====(

- +

@@ -270,7 +270,7 @@ bool connectTo01OS(String server_address) portStr = server_address.substring(colonIndex + 1); } else { domain = server_address; - portStr = ""; + portStr = ""; } WiFiClient c; @@ -281,7 +281,7 @@ bool connectTo01OS(String server_address) port = portStr.toInt(); } - HttpClient http(c, domain.c_str(), port); + HttpClient http(c, domain.c_str(), port); Serial.println("Connecting to 01OS at " + domain + ":" + port + "/ping"); if (domain.indexOf("ngrok") != -1) { @@ -363,7 +363,7 @@ bool connectTo01OS(String server_address) Serial.print("Connection failed: "); Serial.println(err); } - + return connectionSuccess; } @@ -436,7 +436,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP) { String ssid; String password; - + // Check if SSID parameter exists and assign it if(request->hasParam("ssid", true)) { ssid = request->getParam("ssid", true)->value(); @@ -446,7 +446,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP) Serial.println("OTHER SSID SELECTED: " + ssid); } } - + // Check if Password parameter exists and assign it if(request->hasParam("password", true)) { password = request->getParam("password", true)->value(); @@ -458,7 +458,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP) if(request->hasParam("password", true) && request->hasParam("ssid", true)) { connectToWifi(ssid, password); } - + // Redirect user or send a response back if (WiFi.status() == WL_CONNECTED) { @@ -466,7 +466,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP) AsyncWebServerResponse *response = request->beginResponse(200, "text/html", htmlContent); response->addHeader("Cache-Control", "public,max-age=31536000"); // save this file to cache for 1 year (unless you refresh) request->send(response); - Serial.println("Served Post connection HTML Page"); + Serial.println("Served Post connection HTML Page"); } else { request->send(200, "text/plain", "Failed to connect to " + ssid); } }); @@ -474,7 +474,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP) server.on("/submit_01os", HTTP_POST, [](AsyncWebServerRequest *request) { String server_address; - + // Check if SSID parameter exists and assign it if(request->hasParam("server_address", true)) { server_address = request->getParam("server_address", true)->value(); @@ -490,7 +490,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP) { AsyncWebServerResponse *response = request->beginResponse(200, "text/html", successHtml); response->addHeader("Cache-Control", "no-cache, no-store, must-revalidate"); // Prevent caching of this page - request->send(response); + request->send(response); Serial.println(" "); Serial.println("Connected to 01 websocket!"); Serial.println(" "); @@ -502,7 +502,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP) String htmlContent = String(post_connected_html); // Load your HTML template // Inject the error message htmlContent.replace("

", "

Error connecting, please try again.

"); - + AsyncWebServerResponse *response = request->beginResponse(200, "text/html", htmlContent); response->addHeader("Cache-Control", "no-cache, no-store, must-revalidate"); // Prevent caching of this page request->send(response); @@ -622,7 +622,7 @@ void InitI2SSpeakerOrMic(int mode) #if ESP_IDF_VERSION > ESP_IDF_VERSION_VAL(4, 1, 0) .communication_format = I2S_COMM_FORMAT_STAND_I2S, // Set the format of the communication. -#else +#else .communication_format = I2S_COMM_FORMAT_I2S, #endif .intr_alloc_flags = ESP_INTR_FLAG_LEVEL1, @@ -779,17 +779,17 @@ void setup() { Serial.begin(115200); // Initialize serial communication at 115200 baud rate. // Attempt to reconnect to WiFi using stored credentials. // Check if WiFi is connected but the server URL isn't stored - + Serial.setTxBufferSize(1024); // Set the transmit buffer size for the Serial object. WiFi.mode(WIFI_AP_STA); // Set WiFi mode to both AP and STA. - + // delay(100); // Short delay to ensure mode change takes effect // WiFi.softAPConfig(localIP, gatewayIP, subnetMask); // WiFi.softAP(ssid, password); startSoftAccessPoint(ssid, password, localIP, gatewayIP); setUpDNSServer(dnsServer, localIP); - + setUpWebserver(server, localIP); tryReconnectWiFi(); // Print a welcome message to the Serial port. @@ -823,7 +823,7 @@ void loop() if ((millis() - last_dns_ms) > DNS_INTERVAL) { last_dns_ms = millis(); // seems to help with stability, if you are doing other things in the loop this may not be needed dnsServer.processNextRequest(); // I call this atleast every 10ms in my other projects (can be higher but I haven't tested it for stability) - } + } // Check WiFi connection status if (WiFi.status() == WL_CONNECTED && !hasSetupWebsocket) @@ -865,4 +865,4 @@ void loop() M5.update(); webSocket.loop(); } -} \ No newline at end of file +} diff --git a/software/source/clients/esp32/src/client/platformio.ini b/software/source/clients/esp32/src/client/platformio.ini index 6061e13a..d1011e28 100644 --- a/software/source/clients/esp32/src/client/platformio.ini +++ b/software/source/clients/esp32/src/client/platformio.ini @@ -10,7 +10,7 @@ platform = espressif32 framework = arduino monitor_speed = 115200 upload_speed = 1500000 -monitor_filters = +monitor_filters = esp32_exception_decoder time build_flags = @@ -23,7 +23,7 @@ board = esp32dev [env:m5echo] extends = esp32common -lib_deps = +lib_deps = m5stack/M5Atom @ ^0.1.2 links2004/WebSockets @ ^2.4.1 ;esphome/ESPAsyncWebServer-esphome @ ^3.1.0 diff --git a/software/source/clients/linux/device.py b/software/source/clients/linux/device.py index a9a79c02..0fa0fed2 100644 --- a/software/source/clients/linux/device.py +++ b/software/source/clients/linux/device.py @@ -2,9 +2,11 @@ device = Device() + def main(server_url): device.server_url = server_url device.start() + if __name__ == "__main__": main() diff --git a/software/source/clients/mac/device.py b/software/source/clients/mac/device.py index a9a79c02..0fa0fed2 100644 --- a/software/source/clients/mac/device.py +++ b/software/source/clients/mac/device.py @@ -2,9 +2,11 @@ device = Device() + def main(server_url): device.server_url = server_url device.start() + if __name__ == "__main__": main() diff --git a/software/source/clients/rpi/device.py b/software/source/clients/rpi/device.py index 279822f9..fe0250bd 100644 --- a/software/source/clients/rpi/device.py +++ b/software/source/clients/rpi/device.py @@ -2,8 +2,10 @@ device = Device() + def main(): device.start() + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/software/source/clients/windows/device.py b/software/source/clients/windows/device.py index a9a79c02..0fa0fed2 100644 --- a/software/source/clients/windows/device.py +++ b/software/source/clients/windows/device.py @@ -2,9 +2,11 @@ device = Device() + def main(server_url): device.server_url = server_url device.start() + if __name__ == "__main__": main() diff --git a/software/source/server/llm.py b/software/source/server/llm.py index ba761a30..430e58ad 100644 --- a/software/source/server/llm.py +++ b/software/source/server/llm.py @@ -1,4 +1,5 @@ from dotenv import load_dotenv + load_dotenv() # take environment variables from .env. import os @@ -8,7 +9,7 @@ ### LLM SETUP # Define the path to a llamafile -llamafile_path = Path(__file__).parent / 'model.llamafile' +llamafile_path = Path(__file__).parent / "model.llamafile" # Check if the new llamafile exists, if not download it if not os.path.exists(llamafile_path): @@ -25,4 +26,4 @@ subprocess.run(["chmod", "+x", llamafile_path], check=True) # Run the new llamafile -subprocess.run([str(llamafile_path)], check=True) \ No newline at end of file +subprocess.run([str(llamafile_path)], check=True) diff --git a/software/source/server/services/llm/litellm/llm.py b/software/source/server/services/llm/litellm/llm.py index 906308bf..f4093e4a 100644 --- a/software/source/server/services/llm/litellm/llm.py +++ b/software/source/server/services/llm/litellm/llm.py @@ -1,6 +1,5 @@ class Llm: def __init__(self, config): - # Litellm is used by OI by default, so we just modify OI interpreter = config["interpreter"] @@ -10,6 +9,3 @@ def __init__(self, config): setattr(interpreter, key.replace("-", "_"), value) self.llm = interpreter.llm.completions - - - diff --git a/software/source/server/services/llm/llamaedge/llm.py b/software/source/server/services/llm/llamaedge/llm.py index fa77abf6..7894f544 100644 --- a/software/source/server/services/llm/llamaedge/llm.py +++ b/software/source/server/services/llm/llamaedge/llm.py @@ -3,29 +3,54 @@ import requests import json + class Llm: def __init__(self, config): self.install(config["service_directory"]) def install(self, service_directory): LLM_FOLDER_PATH = service_directory - self.llm_directory = os.path.join(LLM_FOLDER_PATH, 'llm') - if not os.path.isdir(self.llm_directory): # Check if the LLM directory exists + self.llm_directory = os.path.join(LLM_FOLDER_PATH, "llm") + if not os.path.isdir(self.llm_directory): # Check if the LLM directory exists os.makedirs(LLM_FOLDER_PATH, exist_ok=True) # Install WasmEdge - subprocess.run(['curl', '-sSf', 'https://raw.githubusercontent.com/WasmEdge/WasmEdge/master/utils/install.sh', '|', 'bash', '-s', '--', '--plugin', 'wasi_nn-ggml']) + subprocess.run( + [ + "curl", + "-sSf", + "https://raw.githubusercontent.com/WasmEdge/WasmEdge/master/utils/install.sh", + "|", + "bash", + "-s", + "--", + "--plugin", + "wasi_nn-ggml", + ] + ) # Download the Qwen1.5-0.5B-Chat model GGUF file MODEL_URL = "https://huggingface.co/second-state/Qwen1.5-0.5B-Chat-GGUF/resolve/main/Qwen1.5-0.5B-Chat-Q5_K_M.gguf" - subprocess.run(['curl', '-LO', MODEL_URL], cwd=self.llm_directory) - + subprocess.run(["curl", "-LO", MODEL_URL], cwd=self.llm_directory) + # Download the llama-api-server.wasm app APP_URL = "https://github.com/LlamaEdge/LlamaEdge/releases/latest/download/llama-api-server.wasm" - subprocess.run(['curl', '-LO', APP_URL], cwd=self.llm_directory) + subprocess.run(["curl", "-LO", APP_URL], cwd=self.llm_directory) # Run the API server - subprocess.run(['wasmedge', '--dir', '.:.', '--nn-preload', 'default:GGML:AUTO:Qwen1.5-0.5B-Chat-Q5_K_M.gguf', 'llama-api-server.wasm', '-p', 'llama-2-chat'], cwd=self.llm_directory) + subprocess.run( + [ + "wasmedge", + "--dir", + ".:.", + "--nn-preload", + "default:GGML:AUTO:Qwen1.5-0.5B-Chat-Q5_K_M.gguf", + "llama-api-server.wasm", + "-p", + "llama-2-chat", + ], + cwd=self.llm_directory, + ) print("LLM setup completed.") else: @@ -33,17 +58,11 @@ def install(self, service_directory): def llm(self, messages): url = "http://localhost:8080/v1/chat/completions" - headers = { - 'accept': 'application/json', - 'Content-Type': 'application/json' - } - data = { - "messages": messages, - "model": "llama-2-chat" - } - with requests.post(url, headers=headers, data=json.dumps(data), stream=True) as response: + headers = {"accept": "application/json", "Content-Type": "application/json"} + data = {"messages": messages, "model": "llama-2-chat"} + with requests.post( + url, headers=headers, data=json.dumps(data), stream=True + ) as response: for line in response.iter_lines(): if line: yield json.loads(line) - - diff --git a/software/source/server/services/stt/local-whisper/whisper-rust/.gitignore b/software/source/server/services/stt/local-whisper/whisper-rust/.gitignore index 71ab9a43..73fab072 100644 --- a/software/source/server/services/stt/local-whisper/whisper-rust/.gitignore +++ b/software/source/server/services/stt/local-whisper/whisper-rust/.gitignore @@ -7,4 +7,4 @@ target/ **/*.rs.bk # MSVC Windows builds of rustc generate these, which store debugging information -*.pdb \ No newline at end of file +*.pdb diff --git a/software/source/server/services/stt/local-whisper/whisper-rust/Cargo.toml b/software/source/server/services/stt/local-whisper/whisper-rust/Cargo.toml index f1726929..c3daf687 100644 --- a/software/source/server/services/stt/local-whisper/whisper-rust/Cargo.toml +++ b/software/source/server/services/stt/local-whisper/whisper-rust/Cargo.toml @@ -11,4 +11,4 @@ clap = { version = "4.4.18", features = ["derive"] } cpal = "0.15.2" hound = "3.5.1" whisper-rs = "0.10.0" -whisper-rs-sys = "0.8.0" \ No newline at end of file +whisper-rs-sys = "0.8.0" diff --git a/software/source/server/services/stt/local-whisper/whisper-rust/src/main.rs b/software/source/server/services/stt/local-whisper/whisper-rust/src/main.rs index 0688c89e..52965388 100644 --- a/software/source/server/services/stt/local-whisper/whisper-rust/src/main.rs +++ b/software/source/server/services/stt/local-whisper/whisper-rust/src/main.rs @@ -10,7 +10,7 @@ struct Args { /// This is the model for Whisper STT #[arg(short, long, value_parser, required = true)] model_path: PathBuf, - + /// This is the wav audio file that will be converted from speech to text #[arg(short, long, value_parser, required = true)] file_path: Option, @@ -31,4 +31,4 @@ fn main() { Ok(transcription) => print!("{}", transcription), Err(e) => panic!("Error: {}", e), } -} \ No newline at end of file +} diff --git a/software/source/server/services/stt/local-whisper/whisper-rust/src/transcribe.rs b/software/source/server/services/stt/local-whisper/whisper-rust/src/transcribe.rs index 35970cc9..99e1a527 100644 --- a/software/source/server/services/stt/local-whisper/whisper-rust/src/transcribe.rs +++ b/software/source/server/services/stt/local-whisper/whisper-rust/src/transcribe.rs @@ -61,4 +61,4 @@ pub fn transcribe(model_path: &PathBuf, file_path: &PathBuf) -> Result str: if mime_type == "audio/x-wav" or mime_type == "audio/wav": return "wav" @@ -29,30 +29,37 @@ def convert_mime_type_to_format(mime_type: str) -> str: return mime_type + @contextlib.contextmanager def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str: temp_dir = tempfile.gettempdir() # Create a temporary file with the appropriate extension input_ext = convert_mime_type_to_format(mime_type) - input_path = os.path.join(temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}") - with open(input_path, 'wb') as f: + input_path = os.path.join( + temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}" + ) + with open(input_path, "wb") as f: f.write(audio) # Check if the input file exists assert os.path.exists(input_path), f"Input file does not exist: {input_path}" # Export to wav - output_path = os.path.join(temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav") + output_path = os.path.join( + temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav" + ) if mime_type == "audio/raw": ffmpeg.input( input_path, - f='s16le', - ar='16000', + f="s16le", + ar="16000", ac=1, - ).output(output_path, loglevel='panic').run() + ).output(output_path, loglevel="panic").run() else: - ffmpeg.input(input_path).output(output_path, acodec='pcm_s16le', ac=1, ar='16k', loglevel='panic').run() + ffmpeg.input(input_path).output( + output_path, acodec="pcm_s16le", ac=1, ar="16k", loglevel="panic" + ).run() try: yield output_path @@ -60,39 +67,49 @@ def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str: os.remove(input_path) os.remove(output_path) + def run_command(command): - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + result = subprocess.run( + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True + ) return result.stdout, result.stderr -def get_transcription_file(wav_file_path: str): - local_path = os.path.join(os.path.dirname(__file__), 'local_service') - whisper_rust_path = os.path.join(os.path.dirname(__file__), 'whisper-rust', 'target', 'release') - model_name = os.getenv('WHISPER_MODEL_NAME', 'ggml-tiny.en.bin') - output, error = run_command([ - os.path.join(whisper_rust_path, 'whisper-rust'), - '--model-path', os.path.join(local_path, model_name), - '--file-path', wav_file_path - ]) +def get_transcription_file(wav_file_path: str): + local_path = os.path.join(os.path.dirname(__file__), "local_service") + whisper_rust_path = os.path.join( + os.path.dirname(__file__), "whisper-rust", "target", "release" + ) + model_name = os.getenv("WHISPER_MODEL_NAME", "ggml-tiny.en.bin") + + output, error = run_command( + [ + os.path.join(whisper_rust_path, "whisper-rust"), + "--model-path", + os.path.join(local_path, model_name), + "--file-path", + wav_file_path, + ] + ) return output + def get_transcription_bytes(audio_bytes: bytearray, mime_type): with export_audio_to_wav_ffmpeg(audio_bytes, mime_type) as wav_file_path: return get_transcription_file(wav_file_path) + def stt_bytes(audio_bytes: bytearray, mime_type="audio/wav"): with export_audio_to_wav_ffmpeg(audio_bytes, mime_type) as wav_file_path: return stt_wav(wav_file_path) -def stt_wav(wav_file_path: str): +def stt_wav(wav_file_path: str): audio_file = open(wav_file_path, "rb") try: transcript = client.audio.transcriptions.create( - model="whisper-1", - file=audio_file, - response_format="text" + model="whisper-1", file=audio_file, response_format="text" ) except openai.BadRequestError as e: print(f"openai.BadRequestError: {e}") @@ -100,10 +117,13 @@ def stt_wav(wav_file_path: str): return transcript + def stt(input_data, mime_type="audio/wav"): if isinstance(input_data, str): return stt_wav(input_data) elif isinstance(input_data, bytearray): return stt_bytes(input_data, mime_type) else: - raise ValueError("Input data should be either a path to a wav file (str) or audio bytes (bytearray)") \ No newline at end of file + raise ValueError( + "Input data should be either a path to a wav file (str) or audio bytes (bytearray)" + ) diff --git a/software/source/server/services/tts/piper/tts.py b/software/source/server/services/tts/piper/tts.py index 46d23dc8..8daa1584 100644 --- a/software/source/server/services/tts/piper/tts.py +++ b/software/source/server/services/tts/piper/tts.py @@ -13,26 +13,40 @@ def __init__(self, config): self.install(config["service_directory"]) def tts(self, text): - with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file: output_file = temp_file.name piper_dir = self.piper_directory - subprocess.run([ - os.path.join(piper_dir, 'piper'), - '--model', os.path.join(piper_dir, os.getenv('PIPER_VOICE_NAME', 'en_US-lessac-medium.onnx')), - '--output_file', output_file - ], input=text, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + subprocess.run( + [ + os.path.join(piper_dir, "piper"), + "--model", + os.path.join( + piper_dir, + os.getenv("PIPER_VOICE_NAME", "en_US-lessac-medium.onnx"), + ), + "--output_file", + output_file, + ], + input=text, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) # TODO: hack to format audio correctly for device outfile = tempfile.gettempdir() + "/" + "raw.dat" - ffmpeg.input(temp_file.name).output(outfile, f="s16le", ar="16000", ac="1", loglevel='panic').run() + ffmpeg.input(temp_file.name).output( + outfile, f="s16le", ar="16000", ac="1", loglevel="panic" + ).run() return outfile def install(self, service_directory): PIPER_FOLDER_PATH = service_directory - self.piper_directory = os.path.join(PIPER_FOLDER_PATH, 'piper') - if not os.path.isdir(self.piper_directory): # Check if the Piper directory exists + self.piper_directory = os.path.join(PIPER_FOLDER_PATH, "piper") + if not os.path.isdir( + self.piper_directory + ): # Check if the Piper directory exists os.makedirs(PIPER_FOLDER_PATH, exist_ok=True) # Determine OS and architecture @@ -60,52 +74,92 @@ def install(self, service_directory): asset_url = f"{PIPER_URL}{PIPER_ASSETNAME}" if OS == "windows": - asset_url = asset_url.replace(".tar.gz", ".zip") # Download and extract Piper - urllib.request.urlretrieve(asset_url, os.path.join(PIPER_FOLDER_PATH, PIPER_ASSETNAME)) + urllib.request.urlretrieve( + asset_url, os.path.join(PIPER_FOLDER_PATH, PIPER_ASSETNAME) + ) # Extract the downloaded file if OS == "windows": import zipfile - with zipfile.ZipFile(os.path.join(PIPER_FOLDER_PATH, PIPER_ASSETNAME), 'r') as zip_ref: + + with zipfile.ZipFile( + os.path.join(PIPER_FOLDER_PATH, PIPER_ASSETNAME), "r" + ) as zip_ref: zip_ref.extractall(path=PIPER_FOLDER_PATH) else: - with tarfile.open(os.path.join(PIPER_FOLDER_PATH, PIPER_ASSETNAME), 'r:gz') as tar: + with tarfile.open( + os.path.join(PIPER_FOLDER_PATH, PIPER_ASSETNAME), "r:gz" + ) as tar: tar.extractall(path=PIPER_FOLDER_PATH) - PIPER_VOICE_URL = os.getenv('PIPER_VOICE_URL', - 'https://huggingface.co/rhasspy/piper-voices/resolve/main/en/en_US/lessac/medium/') - PIPER_VOICE_NAME = os.getenv('PIPER_VOICE_NAME', 'en_US-lessac-medium.onnx') + PIPER_VOICE_URL = os.getenv( + "PIPER_VOICE_URL", + "https://huggingface.co/rhasspy/piper-voices/resolve/main/en/en_US/lessac/medium/", + ) + PIPER_VOICE_NAME = os.getenv("PIPER_VOICE_NAME", "en_US-lessac-medium.onnx") # Download voice model and its json file - urllib.request.urlretrieve(f"{PIPER_VOICE_URL}{PIPER_VOICE_NAME}", - os.path.join(self.piper_directory, PIPER_VOICE_NAME)) - urllib.request.urlretrieve(f"{PIPER_VOICE_URL}{PIPER_VOICE_NAME}.json", - os.path.join(self.piper_directory, f"{PIPER_VOICE_NAME}.json")) + urllib.request.urlretrieve( + f"{PIPER_VOICE_URL}{PIPER_VOICE_NAME}", + os.path.join(self.piper_directory, PIPER_VOICE_NAME), + ) + urllib.request.urlretrieve( + f"{PIPER_VOICE_URL}{PIPER_VOICE_NAME}.json", + os.path.join(self.piper_directory, f"{PIPER_VOICE_NAME}.json"), + ) # Additional setup for macOS if OS == "macos": if ARCH == "x64": - subprocess.run(['softwareupdate', '--install-rosetta', '--agree-to-license']) + subprocess.run( + ["softwareupdate", "--install-rosetta", "--agree-to-license"] + ) PIPER_PHONEMIZE_ASSETNAME = f"piper-phonemize_{OS}_{ARCH}.tar.gz" PIPER_PHONEMIZE_URL = "https://github.com/rhasspy/piper-phonemize/releases/latest/download/" - urllib.request.urlretrieve(f"{PIPER_PHONEMIZE_URL}{PIPER_PHONEMIZE_ASSETNAME}", - os.path.join(self.piper_directory, PIPER_PHONEMIZE_ASSETNAME)) - - with tarfile.open(os.path.join(self.piper_directory, PIPER_PHONEMIZE_ASSETNAME), 'r:gz') as tar: + urllib.request.urlretrieve( + f"{PIPER_PHONEMIZE_URL}{PIPER_PHONEMIZE_ASSETNAME}", + os.path.join(self.piper_directory, PIPER_PHONEMIZE_ASSETNAME), + ) + + with tarfile.open( + os.path.join(self.piper_directory, PIPER_PHONEMIZE_ASSETNAME), + "r:gz", + ) as tar: tar.extractall(path=self.piper_directory) PIPER_DIR = self.piper_directory - subprocess.run(['install_name_tool', '-change', '@rpath/libespeak-ng.1.dylib', - f"{PIPER_DIR}/piper-phonemize/lib/libespeak-ng.1.dylib", f"{PIPER_DIR}/piper"]) - subprocess.run(['install_name_tool', '-change', '@rpath/libonnxruntime.1.14.1.dylib', - f"{PIPER_DIR}/piper-phonemize/lib/libonnxruntime.1.14.1.dylib", f"{PIPER_DIR}/piper"]) - subprocess.run(['install_name_tool', '-change', '@rpath/libpiper_phonemize.1.dylib', - f"{PIPER_DIR}/piper-phonemize/lib/libpiper_phonemize.1.dylib", f"{PIPER_DIR}/piper"]) + subprocess.run( + [ + "install_name_tool", + "-change", + "@rpath/libespeak-ng.1.dylib", + f"{PIPER_DIR}/piper-phonemize/lib/libespeak-ng.1.dylib", + f"{PIPER_DIR}/piper", + ] + ) + subprocess.run( + [ + "install_name_tool", + "-change", + "@rpath/libonnxruntime.1.14.1.dylib", + f"{PIPER_DIR}/piper-phonemize/lib/libonnxruntime.1.14.1.dylib", + f"{PIPER_DIR}/piper", + ] + ) + subprocess.run( + [ + "install_name_tool", + "-change", + "@rpath/libpiper_phonemize.1.dylib", + f"{PIPER_DIR}/piper-phonemize/lib/libpiper_phonemize.1.dylib", + f"{PIPER_DIR}/piper", + ] + ) print("Piper setup completed.") else: - print("Piper already set up. Skipping download.") \ No newline at end of file + print("Piper already set up. Skipping download.") diff --git a/software/source/server/system_messages/BaseSystemMessage.py b/software/source/server/system_messages/BaseSystemMessage.py index 20429f3d..7fdaefce 100644 --- a/software/source/server/system_messages/BaseSystemMessage.py +++ b/software/source/server/system_messages/BaseSystemMessage.py @@ -36,7 +36,7 @@ The user's current task is: {{ tasks[0] if tasks else "No current tasks." }} -{{ +{{ if len(tasks) > 1: print("The next task is: ", tasks[1]) }} @@ -91,7 +91,7 @@ The user's current task is: {{ tasks[0] if tasks else "No current tasks." }} -{{ +{{ if len(tasks) > 1: print("The next task is: ", tasks[1]) }} @@ -184,7 +184,7 @@ finally: sys.stdout = original_stdout sys.stderr = original_stderr - + }} # SKILLS diff --git a/software/source/server/system_messages/TeachModeSystemMessage.py b/software/source/server/system_messages/TeachModeSystemMessage.py index d88708c6..4c8ec09f 100644 --- a/software/source/server/system_messages/TeachModeSystemMessage.py +++ b/software/source/server/system_messages/TeachModeSystemMessage.py @@ -96,7 +96,7 @@ finally: sys.stdout = original_stdout sys.stderr = original_stderr - + }} # SKILLS LIBRARY @@ -131,4 +131,6 @@ Remember: You can run Python code outside a function only to run a Python function; all other code must go in a in Python function if you first write a Python function. ALL imports must go inside the function. -""".strip().replace("OI_SKILLS_DIR", os.path.abspath(os.path.join(os.path.dirname(__file__), "skills"))) \ No newline at end of file +""".strip().replace( + "OI_SKILLS_DIR", os.path.abspath(os.path.join(os.path.dirname(__file__), "skills")) +) diff --git a/software/source/server/tunnel.py b/software/source/server/tunnel.py index 6d6acb01..809db081 100644 --- a/software/source/server/tunnel.py +++ b/software/source/server/tunnel.py @@ -1,12 +1,14 @@ import subprocess import re -import shutil import pyqrcode import time from ..utils.print_markdown import print_markdown -def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10001, qr=False): - print_markdown(f"Exposing server to the internet...") + +def create_tunnel( + tunnel_method="ngrok", server_host="localhost", server_port=10001, qr=False +): + print_markdown("Exposing server to the internet...") server_url = "" if tunnel_method == "bore": @@ -35,9 +37,11 @@ def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10 if not line: break if "listening at bore.pub:" in line: - remote_port = re.search('bore.pub:([0-9]*)', line).group(1) + remote_port = re.search("bore.pub:([0-9]*)", line).group(1) server_url = f"bore.pub:{remote_port}" - print_markdown(f"Your server is being hosted at the following URL: bore.pub:{remote_port}") + print_markdown( + f"Your server is being hosted at the following URL: bore.pub:{remote_port}" + ) break elif tunnel_method == "localtunnel": @@ -69,9 +73,11 @@ def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10 match = url_pattern.search(line) if match: found_url = True - remote_url = match.group(0).replace('your url is: ', '') + remote_url = match.group(0).replace("your url is: ", "") server_url = remote_url - print(f"\nYour server is being hosted at the following URL: {remote_url}") + print( + f"\nYour server is being hosted at the following URL: {remote_url}" + ) break # Exit the loop once the URL is found if not found_url: @@ -93,7 +99,11 @@ def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10 # If ngrok is installed, start it on the specified port # process = subprocess.Popen(f'ngrok http {server_port} --log=stdout', shell=True, stdout=subprocess.PIPE) - process = subprocess.Popen(f'ngrok http {server_port} --scheme http,https --domain=marten-advanced-dragon.ngrok-free.app --log=stdout', shell=True, stdout=subprocess.PIPE) + process = subprocess.Popen( + f"ngrok http {server_port} --scheme http,https --domain=marten-advanced-dragon.ngrok-free.app --log=stdout", + shell=True, + stdout=subprocess.PIPE, + ) # Initially, no URL is found found_url = False @@ -110,15 +120,18 @@ def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10 found_url = True remote_url = match.group(0) server_url = remote_url - print(f"\nYour server is being hosted at the following URL: {remote_url}") + print( + f"\nYour server is being hosted at the following URL: {remote_url}" + ) break # Exit the loop once the URL is found if not found_url: - print("Failed to extract the ngrok tunnel URL. Please check ngrok's output for details.") + print( + "Failed to extract the ngrok tunnel URL. Please check ngrok's output for details." + ) if server_url and qr: text = pyqrcode.create(remote_url) print(text.terminal(quiet_zone=1)) return server_url - diff --git a/software/source/server/utils/bytes_to_wav.py b/software/source/server/utils/bytes_to_wav.py index d40ae150..a1892576 100644 --- a/software/source/server/utils/bytes_to_wav.py +++ b/software/source/server/utils/bytes_to_wav.py @@ -5,6 +5,7 @@ import ffmpeg import subprocess + def convert_mime_type_to_format(mime_type: str) -> str: if mime_type == "audio/x-wav" or mime_type == "audio/wav": return "wav" @@ -15,39 +16,49 @@ def convert_mime_type_to_format(mime_type: str) -> str: return mime_type + @contextlib.contextmanager def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str: temp_dir = tempfile.gettempdir() # Create a temporary file with the appropriate extension input_ext = convert_mime_type_to_format(mime_type) - input_path = os.path.join(temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}") - with open(input_path, 'wb') as f: + input_path = os.path.join( + temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}" + ) + with open(input_path, "wb") as f: f.write(audio) # Check if the input file exists assert os.path.exists(input_path), f"Input file does not exist: {input_path}" # Export to wav - output_path = os.path.join(temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav") + output_path = os.path.join( + temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav" + ) print(mime_type, input_path, output_path) if mime_type == "audio/raw": ffmpeg.input( input_path, - f='s16le', - ar='16000', + f="s16le", + ar="16000", ac=1, - ).output(output_path, loglevel='panic').run() + ).output(output_path, loglevel="panic").run() else: - ffmpeg.input(input_path).output(output_path, acodec='pcm_s16le', ac=1, ar='16k', loglevel='panic').run() + ffmpeg.input(input_path).output( + output_path, acodec="pcm_s16le", ac=1, ar="16k", loglevel="panic" + ).run() try: yield output_path finally: os.remove(input_path) + def run_command(command): - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + result = subprocess.run( + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True + ) return result.stdout, result.stderr diff --git a/software/source/server/utils/kernel.py b/software/source/server/utils/kernel.py index 4b800af9..fcca1076 100644 --- a/software/source/server/utils/kernel.py +++ b/software/source/server/utils/kernel.py @@ -1,4 +1,5 @@ from dotenv import load_dotenv + load_dotenv() # take environment variables from .env. import asyncio @@ -7,42 +8,49 @@ from .logs import setup_logging from .logs import logger + setup_logging() + def get_kernel_messages(): """ Is this the way to do this? """ current_platform = platform.system() - + if current_platform == "Darwin": - process = subprocess.Popen(['syslog'], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + process = subprocess.Popen( + ["syslog"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL + ) output, _ = process.communicate() - return output.decode('utf-8') + return output.decode("utf-8") elif current_platform == "Linux": - with open('/var/log/dmesg', 'r') as file: + with open("/var/log/dmesg", "r") as file: return file.read() else: logger.info("Unsupported platform.") + def custom_filter(message): # Check for {TO_INTERPRETER{ message here }TO_INTERPRETER} pattern - if '{TO_INTERPRETER{' in message and '}TO_INTERPRETER}' in message: - start = message.find('{TO_INTERPRETER{') + len('{TO_INTERPRETER{') - end = message.find('}TO_INTERPRETER}', start) + if "{TO_INTERPRETER{" in message and "}TO_INTERPRETER}" in message: + start = message.find("{TO_INTERPRETER{") + len("{TO_INTERPRETER{") + end = message.find("}TO_INTERPRETER}", start) return message[start:end] # Check for USB mention # elif 'USB' in message: # return message # # Check for network related keywords # elif any(keyword in message for keyword in ['network', 'IP', 'internet', 'LAN', 'WAN', 'router', 'switch']) and "networkStatusForFlags" not in message: - + # return message else: return None - + + last_messages = "" + def check_filtered_kernel(): messages = get_kernel_messages() if messages is None: @@ -51,12 +59,12 @@ def check_filtered_kernel(): global last_messages messages.replace(last_messages, "") messages = messages.split("\n") - + filtered_messages = [] for message in messages: if custom_filter(message): filtered_messages.append(message) - + return "\n".join(filtered_messages) @@ -66,11 +74,25 @@ async def put_kernel_messages_into_queue(queue): if text: if isinstance(queue, asyncio.Queue): await queue.put({"role": "computer", "type": "console", "start": True}) - await queue.put({"role": "computer", "type": "console", "format": "output", "content": text}) + await queue.put( + { + "role": "computer", + "type": "console", + "format": "output", + "content": text, + } + ) await queue.put({"role": "computer", "type": "console", "end": True}) else: queue.put({"role": "computer", "type": "console", "start": True}) - queue.put({"role": "computer", "type": "console", "format": "output", "content": text}) + queue.put( + { + "role": "computer", + "type": "console", + "format": "output", + "content": text, + } + ) queue.put({"role": "computer", "type": "console", "end": True}) - - await asyncio.sleep(5) \ No newline at end of file + + await asyncio.sleep(5) diff --git a/software/source/server/utils/logs.py b/software/source/server/utils/logs.py index 5aca8bb6..7b071a63 100644 --- a/software/source/server/utils/logs.py +++ b/software/source/server/utils/logs.py @@ -1,4 +1,5 @@ from dotenv import load_dotenv + load_dotenv() # take environment variables from .env. import os @@ -9,9 +10,7 @@ def _basic_config() -> None: - logging.basicConfig( - format="%(message)s" - ) + logging.basicConfig(format="%(message)s") def setup_logging() -> None: diff --git a/software/source/utils/accumulator.py b/software/source/utils/accumulator.py index edecda16..37912b5c 100644 --- a/software/source/utils/accumulator.py +++ b/software/source/utils/accumulator.py @@ -1,12 +1,11 @@ class Accumulator: def __init__(self): - self.template = {"role": None, "type": None, "format": None, "content": None} + self.template = {"role": None, "type": None, "format": None, "content": None} self.message = self.template def accumulate(self, chunk): - #print(str(chunk)[:100]) + # print(str(chunk)[:100]) if type(chunk) == dict: - if "format" in chunk and chunk["format"] == "active_line": # We don't do anything with these return None @@ -17,15 +16,20 @@ def accumulate(self, chunk): return None if "content" in chunk: - - if any(self.message[key] != chunk[key] for key in self.message if key != "content"): + if any( + self.message[key] != chunk[key] + for key in self.message + if key != "content" + ): self.message = chunk if "content" not in self.message: self.message["content"] = chunk["content"] else: if type(chunk["content"]) == dict: # dict concatenation cannot happen, so we see if chunk is a dict - self.message["content"]["content"] += chunk["content"]["content"] + self.message["content"]["content"] += chunk["content"][ + "content" + ] else: self.message["content"] += chunk["content"] return None @@ -41,5 +45,3 @@ def accumulate(self, chunk): self.message["content"] = b"" self.message["content"] += chunk return None - - \ No newline at end of file diff --git a/software/source/utils/print_markdown.py b/software/source/utils/print_markdown.py index 9fbbda80..f4eff474 100644 --- a/software/source/utils/print_markdown.py +++ b/software/source/utils/print_markdown.py @@ -1,9 +1,10 @@ from rich.console import Console from rich.markdown import Markdown + def print_markdown(markdown_text): console = Console() md = Markdown(markdown_text) print("") console.print(md) - print("") \ No newline at end of file + print("") diff --git a/software/start.py b/software/start.py index d521ad01..4f3377f9 100644 --- a/software/start.py +++ b/software/start.py @@ -15,35 +15,64 @@ @app.command() def run( - server: bool = typer.Option(False, "--server", help="Run server"), - server_host: str = typer.Option("0.0.0.0", "--server-host", help="Specify the server host where the server will deploy"), - server_port: int = typer.Option(10001, "--server-port", help="Specify the server port where the server will deploy"), - - tunnel_service: str = typer.Option("ngrok", "--tunnel-service", help="Specify the tunnel service"), - expose: bool = typer.Option(False, "--expose", help="Expose server to internet"), - - client: bool = typer.Option(False, "--client", help="Run client"), - server_url: str = typer.Option(None, "--server-url", help="Specify the server URL that the client should expect. Defaults to server-host and server-port"), - client_type: str = typer.Option("auto", "--client-type", help="Specify the client type"), - - llm_service: str = typer.Option("litellm", "--llm-service", help="Specify the LLM service"), - - model: str = typer.Option("gpt-4", "--model", help="Specify the model"), - llm_supports_vision: bool = typer.Option(False, "--llm-supports-vision", help="Specify if the LLM service supports vision"), - llm_supports_functions: bool = typer.Option(False, "--llm-supports-functions", help="Specify if the LLM service supports functions"), - context_window: int = typer.Option(2048, "--context-window", help="Specify the context window size"), - max_tokens: int = typer.Option(4096, "--max-tokens", help="Specify the maximum number of tokens"), - temperature: float = typer.Option(0.8, "--temperature", help="Specify the temperature for generation"), - - tts_service: str = typer.Option("openai", "--tts-service", help="Specify the TTS service"), - - stt_service: str = typer.Option("openai", "--stt-service", help="Specify the STT service"), - - local: bool = typer.Option(False, "--local", help="Use recommended local services for LLM, STT, and TTS"), - - qr: bool = typer.Option(False, "--qr", help="Print the QR code for the server URL") - ): - + server: bool = typer.Option(False, "--server", help="Run server"), + server_host: str = typer.Option( + "0.0.0.0", + "--server-host", + help="Specify the server host where the server will deploy", + ), + server_port: int = typer.Option( + 10001, + "--server-port", + help="Specify the server port where the server will deploy", + ), + tunnel_service: str = typer.Option( + "ngrok", "--tunnel-service", help="Specify the tunnel service" + ), + expose: bool = typer.Option(False, "--expose", help="Expose server to internet"), + client: bool = typer.Option(False, "--client", help="Run client"), + server_url: str = typer.Option( + None, + "--server-url", + help="Specify the server URL that the client should expect. Defaults to server-host and server-port", + ), + client_type: str = typer.Option( + "auto", "--client-type", help="Specify the client type" + ), + llm_service: str = typer.Option( + "litellm", "--llm-service", help="Specify the LLM service" + ), + model: str = typer.Option("gpt-4", "--model", help="Specify the model"), + llm_supports_vision: bool = typer.Option( + False, + "--llm-supports-vision", + help="Specify if the LLM service supports vision", + ), + llm_supports_functions: bool = typer.Option( + False, + "--llm-supports-functions", + help="Specify if the LLM service supports functions", + ), + context_window: int = typer.Option( + 2048, "--context-window", help="Specify the context window size" + ), + max_tokens: int = typer.Option( + 4096, "--max-tokens", help="Specify the maximum number of tokens" + ), + temperature: float = typer.Option( + 0.8, "--temperature", help="Specify the temperature for generation" + ), + tts_service: str = typer.Option( + "openai", "--tts-service", help="Specify the TTS service" + ), + stt_service: str = typer.Option( + "openai", "--stt-service", help="Specify the STT service" + ), + local: bool = typer.Option( + False, "--local", help="Use recommended local services for LLM, STT, and TTS" + ), + qr: bool = typer.Option(False, "--qr", help="Print the QR code for the server URL"), +): _run( server=server, server_host=server_host, @@ -63,7 +92,7 @@ def run( tts_service=tts_service, stt_service=stt_service, local=local, - qr=qr + qr=qr, ) @@ -86,7 +115,7 @@ def _run( tts_service: str = "openai", stt_service: str = "openai", local: bool = False, - qr: bool = False + qr: bool = False, ): if local: tts_service = "piper" @@ -130,7 +159,9 @@ def handle_exit(signum, frame): server_thread.start() if expose: - tunnel_thread = threading.Thread(target=create_tunnel, args=[tunnel_service, server_host, server_port, qr]) + tunnel_thread = threading.Thread( + target=create_tunnel, args=[tunnel_service, server_host, server_port, qr] + ) tunnel_thread.start() if client: From beef03b7b4f443ff2db7b2f3404d59f94eac5634 Mon Sep 17 00:00:00 2001 From: Davy Peter Braun <543614+dheavy@users.noreply.github.com> Date: Sun, 7 Apr 2024 15:13:03 +0200 Subject: [PATCH 4/5] Change linter to black --- .pre-commit-config.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8380e16f..14109cbf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,10 +1,10 @@ repos: - - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.2.2" + - repo: https://github.com/psf/black + rev: 24.3.0 # Use the latest revision of Black hooks: - - id: ruff - args: ["--fix", "software/"] - - id: ruff-format + - id: black + language_version: python3 + args: ["software/"] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: From ec1e41eb4a8271dd7d1b7f852b4edd025b96d75b Mon Sep 17 00:00:00 2001 From: Davy Peter Braun <543614+dheavy@users.noreply.github.com> Date: Sun, 7 Apr 2024 15:43:10 +0200 Subject: [PATCH 5/5] Add changes on linted files --- software/source/server/server.py | 4 +++- software/source/server/system_messages/BaseSystemMessage.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/software/source/server/server.py b/software/source/server/server.py index 3f3141ad..c4dd0367 100644 --- a/software/source/server/server.py +++ b/software/source/server/server.py @@ -58,7 +58,9 @@ def split_into_sentences(text): # Queues -from_computer = queue.Queue() # Just for computer messages from the device. Sync queue because interpreter.run is synchronous +from_computer = ( + queue.Queue() +) # Just for computer messages from the device. Sync queue because interpreter.run is synchronous from_user = asyncio.Queue() # Just for user messages from the device. to_device = asyncio.Queue() # For messages we send. diff --git a/software/source/server/system_messages/BaseSystemMessage.py b/software/source/server/system_messages/BaseSystemMessage.py index 7fdaefce..00070a98 100644 --- a/software/source/server/system_messages/BaseSystemMessage.py +++ b/software/source/server/system_messages/BaseSystemMessage.py @@ -237,4 +237,6 @@ ALWAYS REMEMBER: You are running on a device called the O1, where the interface is entirely speech-based. Make your responses to the user **VERY short.** -""".strip().replace("OI_SKILLS_DIR", os.path.join(os.path.dirname(__file__), "skills")) +""".strip().replace( + "OI_SKILLS_DIR", os.path.join(os.path.dirname(__file__), "skills") +)