diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 000000000..0c9b18381 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,23 @@ +repo: + - '*' + +github: + - .github/**/* + +application: + - application/**/* + +docs: + - docs/**/* + +extensions: + - extensions/**/* + +frontend: + - frontend/**/* + +scripts: + - scripts/**/* + +tests: + - tests/**/* diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 000000000..f85abb125 --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,15 @@ +# https://github.com/actions/labeler +name: Pull Request Labeler +on: + - pull_request_target +jobs: + triage: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v4 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + sync-labels: true diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index ef6fe19d8..512b6c238 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -2,7 +2,7 @@ ## Our Pledge -We as members, contributors, and leaders pledge to make participation in our +We as members, contributors, and leaders, pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, @@ -10,20 +10,20 @@ nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. +diverse, inclusive, and a healthy community. ## Our Standards -Examples of behavior that contributes to a positive environment for our +Examples of behavior that contribute to a positive environment for our community include: -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences +* Demonstrating empathy and kindness towards other people +* Being respectful and open to differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience +* Taking accountability and offering apologies to those who have been impacted by our errors, + while also gaining insights from the situation * Focusing on what is best not just for us as individuals, but for the - overall community + community as a whole Examples of unacceptable behavior include: @@ -31,7 +31,7 @@ Examples of unacceptable behavior include: advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment -* Publishing others' private information, such as a physical or email +* Publishing other's private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting @@ -74,7 +74,7 @@ the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. +unprofessional or unwelcome in the community space. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the @@ -107,7 +107,7 @@ Violating these terms may lead to a permanent ban. **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. +individual, or aggression towards or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 78a3d9b1b..b759fd46f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ -# Welcome to DocsGPT Contributing guideline +# Welcome to DocsGPT Contributing Guidelines -Thank you for choosing this project to contribute to, we are all very grateful! +Thank you for choosing this project to contribute to. We are all very grateful! ### [πŸŽ‰ Join the Hacktoberfest with DocsGPT and Earn a Free T-shirt! πŸŽ‰](https://github.com/arc53/DocsGPT/blob/main/HACKTOBERFEST.md) @@ -17,30 +17,36 @@ Thank you for choosing this project to contribute to, we are all very grateful! ## 🐞 Issues and Pull requests -We value contributions to our issues in the form of discussion or suggestion, we recommend that you check out existing issues and our [Roadmap](https://github.com/orgs/arc53/projects/2) +We value contributions to our issues in the form of discussion or suggestions. We recommend that you check out existing issues and our [roadmap](https://github.com/orgs/arc53/projects/2). -If you want to contribute by writing code there are a few things that you should know before doing it: -We have frontend (React, Vite) and Backend (python) +If you want to contribute by writing code, there are a few things that you should know before doing it: + +We have a frontend in React (Vite) and backend in Python. + +### If you are looking to contribute to frontend (βš›οΈReact, Vite): + +- The current frontend is being migrated from `/application` to `/frontend` with a new design, so please contribute to the new one. +- Check out this [milestone](https://github.com/arc53/DocsGPT/milestone/1) and its issues. +- The Figma design can be found [here](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1). -### If you are looking to contribute to Frontend (βš›οΈReact, Vite): -The current frontend is being migrated from /application to /frontend with a new design, so please contribute to the new one. Check out this [Milestone](https://github.com/arc53/DocsGPT/milestone/1) and its issues also [Figma](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1) Please try to follow the guidelines. -### If you are looking to contribute to Backend (🐍Python): -* Check out our issues, and contribute to /application or /scripts (ignore old ingest_rst.py ingest_rst_sphinx.py files, they will be deprecated soon) -* All new code should be covered with unit tests ([pytest](https://github.com/pytest-dev/pytest)). Please find tests under [/tests](https://github.com/arc53/DocsGPT/tree/main/tests) folder. -* Before submitting your PR make sure that after you ingested some test data it is queryable. +### If you are looking to contribute to Backend (🐍 Python): +- Check out our issues and contribute to `/application` or `/scripts` (ignore old `ingest_rst.py` `ingest_rst_sphinx.py` files; they will be deprecated soon). +- All new code should be covered with unit tests ([pytest](https://github.com/pytest-dev/pytest)). Please find tests under [`/tests`](https://github.com/arc53/DocsGPT/tree/main/tests) folder. +- Before submitting your PR, ensure it is queryable after ingesting some test data. ### Testing -To run unit tests, from the root of the repository execute: + +To run unit tests from the root of the repository, execute: ``` python -m pytest ``` ### Workflow: -Create a fork, make changes on your forked repository, and submit changes in the form of a pull request. +Create a fork, make changes on your forked repository, and submit changes as a pull request. ## Questions/collaboration -Please join our [Discord](https://discord.gg/n5BX8dh8rU) don't hesitate, we are very friendly and welcoming to new contributors. +Please join our [Discord](https://discord.gg/n5BX8dh8rU). Don't hesitate; we are very friendly and welcoming to new contributors. # Thank you so much for considering contributing to DocsGPT!πŸ™ diff --git a/HACKTOBERFEST.md b/HACKTOBERFEST.md index 7a0e016e1..1a39e561d 100644 --- a/HACKTOBERFEST.md +++ b/HACKTOBERFEST.md @@ -2,13 +2,13 @@ Welcome, contributors! We're excited to announce that DocsGPT is participating in Hacktoberfest. Get involved by submitting a **meaningful** pull request, and earn a free shirt in return! -All contributors with accepted PR's will receive a cool holopin! 🀩 (Watchout for a reply in your PR to collect it) +All contributors with accepted PRs will receive a cool Holopin! 🀩 (Watch out for a reply in your PR to collect it). πŸ“œ Here's How to Contribute: πŸ› οΈ Code: This is the golden ticket! Make meaningful contributions through PRs. πŸ“š Wiki: Improve our documentation, Create a guide or change existing documentation. - πŸ–₯️ Design: Improve the UI/UX, or design a new feature. + πŸ–₯️ Design: Improve the UI/UX or design a new feature. πŸ“ Guidelines for Pull Requests: @@ -16,20 +16,20 @@ Familiarize yourself with the current contributions and our [Roadmap](https://gi Deciding to contribute with code? Here are some insights based on the area of your interest: -Frontend (βš›οΈReact, Vite): - Most of the code is located in /frontend folder. You can also check out our React extension in /extensions/react-widget. - For design references, here's the [Figma](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1). - Ensure you adhere to the established guidelines. +- Frontend (βš›οΈReact, Vite): + - Most of the code is located in `/frontend` folder. You can also check out our React extension in /extensions/react-widget. + - For design references, here's the [Figma](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1). + - Ensure you adhere to the established guidelines. -Backend (🐍Python): - Focus on /application or /scripts. However, avoid the files ingest_rst.py and ingest_rst_sphinx.py as they are soon to be deprecated. - Newly added code should come with relevant unit tests (pytest). - Refer to the /tests folder for test suites. +- Backend (🐍Python): + - Focus on `/application` or `/scripts`. However, avoid the files ingest_rst.py and ingest_rst_sphinx.py, as they will soon be deprecated. + - Newly added code should come with relevant unit tests (pytest). + - Refer to the `/tests` folder for test suites. -Check out [Contributing Guidelines](https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md) +Check out our [Contributing Guidelines](https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md) -Once you have Created your PR and it was merged, please fill in this [form](https://airtable.com/appfkqFVjB0RpYCJh/shrXXM98xgRsbjO7s) +Once you have created your PR and our maintainers have merged it, please fill in this [form](https://airtable.com/appfkqFVjB0RpYCJh/shrXXM98xgRsbjO7s). -Don't be shy! Hop into our [Discord](https://discord.gg/n5BX8dh8rU) Server. We're a friendly bunch and eager to assist newcomers. +Feel free to join our Discord server. We're here to help newcomers, so don't hesitate to jump in! [Join us here](https://discord.gg/n5BX8dh8rU). -Big thanks for considering contributing to DocsGPT during Hacktoberfest! πŸ™ Your effort can earn you a swanky new t-shirt. 🎁 Let's code together! πŸš€ +Thank you very much for considering contributing to DocsGPT during Hacktoberfest! πŸ™ Your contributions could earn you a stylish new t-shirt as a token of our appreciation. 🎁 Join us, and let's code together! πŸš€ diff --git a/README.md b/README.md index ef892c51e..84bb2c1e6 100644 --- a/README.md +++ b/README.md @@ -18,15 +18,13 @@ Say goodbye to time-consuming manual searches, and let DocsGPT ![example2](https://img.shields.io/github/forks/arc53/docsgpt?style=social) ![example3](https://img.shields.io/github/license/arc53/docsgpt) ![example3](https://img.shields.io/discord/1070046503302877216) - - - + -### Production Support/ Help for companies: +### Production Support / Help for companies: -When deploying your DocsGPT to a live environment, we're eager to provide personalized assistance. -- [Schedule Demo πŸ‘‹](https://cal.com/arc53/docsgpt-demo-b2b?date=2023-10-04&month=2023-10) +We're eager to provide personalized assistance when deploying your DocsGPT to a live environment. +- [Get Support πŸ‘‹](https://airtable.com/appdeaL0F1qV8Bl2C/shrrJF1Ll7btCJRbP) - [Send Email βœ‰οΈ](mailto:contact@arc53.com?subject=DocsGPT%20support%2Fsolutions) ### [πŸŽ‰ Join the Hacktoberfest with DocsGPT and Earn a Free T-shirt! πŸŽ‰](https://github.com/arc53/DocsGPT/blob/main/HACKTOBERFEST.md) @@ -36,9 +34,9 @@ When deploying your DocsGPT to a live environment, we're eager to provide person ## Roadmap -You can find our [Roadmap](https://github.com/orgs/arc53/projects/2) here. Please don't hesitate to contribute or create issues, it helps us make DocsGPT better! +You can find our roadmap [here](https://github.com/orgs/arc53/projects/2). Please don't hesitate to contribute or create issues, it helps us improve DocsGPT! -## Our Open-Source models optimised for DocsGPT: +## Our Open-Source models optimized for DocsGPT: | Name | Base Model | Requirements (or similar) | |-------------------|------------|----------------------------------------------------------| @@ -47,7 +45,7 @@ You can find our [Roadmap](https://github.com/orgs/arc53/projects/2) here. Pleas | [Docsgpt-40b-falcon](https://huggingface.co/Arc53/docsgpt-40b-falcon) | falcon-40b | 8xA10G gpu's | -If you don't have enough resources to run it you can use bitsnbytes to quantize +If you don't have enough resources to run it, you can use bitsnbytes to quantize. ## Features @@ -58,7 +56,7 @@ If you don't have enough resources to run it you can use bitsnbytes to quantize ## Useful links [Live preview](https://docsgpt.arc53.com/) - [Join Our Discord](https://discord.gg/n5BX8dh8rU) + [Join our Discord](https://discord.gg/n5BX8dh8rU) [Guides](https://docs.docsgpt.co.uk/) @@ -70,28 +68,28 @@ If you don't have enough resources to run it you can use bitsnbytes to quantize ## Project structure -- Application - Flask app (main application) +- Application - Flask app (main application). -- Extensions - Chrome extension +- Extensions - Chrome extension. -- Scripts - Script that creates similarity search index and store for other libraries. +- Scripts - Script that creates similarity search index and stores for other libraries. -- Frontend - Frontend uses Vite and React +- Frontend - Frontend uses Vite and React. ## QuickStart Note: Make sure you have Docker installed -On Mac OS or Linux just write: +On Mac OS or Linux, write: `./setup.sh` -It will install all the dependencies and give you an option to download local model or use OpenAI +It will install all the dependencies and allow you to download the local model or use OpenAI. -Otherwise refer to this Guide: +Otherwise, refer to this Guide: 1. Download and open this repository with `git clone https://github.com/arc53/DocsGPT.git` -2. Create a .env file in your root directory and set the env variable OPENAI_API_KEY with your OpenAI API key and VITE_API_STREAMING to true or false, depending on if you want streaming answers or not +2. Create a `.env` file in your root directory and set the env variable `OPENAI_API_KEY` with your OpenAI API key and `VITE_API_STREAMING` to true or false, depending on if you want streaming answers or not. It should look like this inside: ``` @@ -99,15 +97,15 @@ Otherwise refer to this Guide: VITE_API_STREAMING=true ``` See optional environment variables in the `/.env-template` and `/application/.env_sample` files. -3. Run `./run-with-docker-compose.sh` -4. Navigate to http://localhost:5173/ +3. Run `./run-with-docker-compose.sh`. +4. Navigate to http://localhost:5173/. -To stop just run Ctrl + C +To stop, just run `Ctrl + C`. ## Development environments ### Spin up mongo and redis -For development only 2 containers are used from docker-compose.yaml (by deleting all services except for Redis and Mongo). +For development, only two containers are used from `docker-compose.yaml` (by deleting all services except for Redis and Mongo). See file [docker-compose-dev.yaml](./docker-compose-dev.yaml). Run @@ -120,31 +118,31 @@ docker compose -f docker-compose-dev.yaml up -d Make sure you have Python 3.10 or 3.11 installed. -1. Export required environment variables or prep .env file in application folder -Prepare .env file -Copy `.env_sample` and create `.env` with your OpenAI API token for the API_KEY and EMBEDDINGS_KEY fields +1. Export required environment variables or prepare a `.env` file in the `/application` folder: + - Copy `.env_sample` and create `.env` with your OpenAI API token for the `API_KEY` and `EMBEDDINGS_KEY` fields. -(check out application/core/settings.py if you want to see more config options) -3. (optional) Create a Python virtual environment +(check out [`application/core/settings.py`](application/core/settings.py) if you want to see more config options.) + +2. (optional) Create a Python virtual environment: ```commandline python -m venv venv . venv/bin/activate ``` -4. Change to `application/` subdir and install dependencies for the backend +3. Change to the `application/` subdir and install dependencies for the backend: ```commandline pip install -r application/requirements.txt ``` -5. Run the app `flask run --host=0.0.0.0 --port=7091` -6. Start worker with `celery -A application.app.celery worker -l INFO` +4. Run the app using `flask run --host=0.0.0.0 --port=7091`. +5. Start worker with `celery -A application.app.celery worker -l INFO`. ### Start frontend + Make sure you have Node version 16 or higher. -1. Navigate to `/frontend` folder -2. Install dependencies -`npm install` -3. Run the app -`npm run dev` +1. Navigate to the `/frontend` folder. +2. Install dependencies by running `npm install`. +3. Run the app using `npm run dev`. + ## Contributing Please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file for information about how to get involved. We welcome issues, questions, and pull requests. @@ -152,7 +150,7 @@ Please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file for information abou ## Code Of Conduct We as members, contributors, and leaders, pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. Please refer to the [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) file for more information about contributing. -## All Thanks To Our Contributors +## Many Thanks To Our Contributors @@ -162,4 +160,3 @@ We as members, contributors, and leaders, pledge to make participation in our co The source code license is MIT, as described in the LICENSE file. Built with [πŸ¦œοΈπŸ”— LangChain](https://github.com/hwchase17/langchain) - diff --git a/application/app.py b/application/app.py index 41b821b7d..ae6199749 100644 --- a/application/app.py +++ b/application/app.py @@ -1,68 +1,44 @@ import platform - - import dotenv from application.celery import celery from flask import Flask, request, redirect - - from application.core.settings import settings from application.api.user.routes import user from application.api.answer.routes import answer from application.api.internal.routes import internal - - -# Redirect PosixPath to WindowsPath on Windows - if platform.system() == "Windows": import pathlib - - temp = pathlib.PosixPath pathlib.PosixPath = pathlib.WindowsPath -# loading the .env file dotenv.load_dotenv() - - app = Flask(__name__) app.register_blueprint(user) app.register_blueprint(answer) app.register_blueprint(internal) -app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER = "inputs" -app.config["CELERY_BROKER_URL"] = settings.CELERY_BROKER_URL -app.config["CELERY_RESULT_BACKEND"] = settings.CELERY_RESULT_BACKEND -app.config["MONGO_URI"] = settings.MONGO_URI +app.config.update( + UPLOAD_FOLDER="inputs", + CELERY_BROKER_URL=settings.CELERY_BROKER_URL, + CELERY_RESULT_BACKEND=settings.CELERY_RESULT_BACKEND, + MONGO_URI=settings.MONGO_URI +) celery.config_from_object("application.celeryconfig") - - @app.route("/") def home(): - """ - The frontend source code lives in the /frontend directory of the repository. - """ if request.remote_addr in ('0.0.0.0', '127.0.0.1', 'localhost', '172.18.0.1'): - # If users locally try to access DocsGPT running in Docker, - # they will be redirected to the Frontend application. return redirect('http://localhost:5173') else: - # Handle other cases or render the default page return 'Welcome to DocsGPT Backend!' - - - -# handling CORS @app.after_request def after_request(response): response.headers.add("Access-Control-Allow-Origin", "*") response.headers.add("Access-Control-Allow-Headers", "Content-Type,Authorization") response.headers.add("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS") - # response.headers.add("Access-Control-Allow-Credentials", "true") return response - if __name__ == "__main__": app.run(debug=True, port=7091) + diff --git a/application/core/settings.py b/application/core/settings.py index a05fd00b5..116735a60 100644 --- a/application/core/settings.py +++ b/application/core/settings.py @@ -32,6 +32,12 @@ class Settings(BaseSettings): ELASTIC_URL: str = None # url for elasticsearch ELASTIC_INDEX: str = "docsgpt" # index name for elasticsearch + # SageMaker config + SAGEMAKER_ENDPOINT: str = None # SageMaker endpoint name + SAGEMAKER_REGION: str = None # SageMaker region name + SAGEMAKER_ACCESS_KEY: str = None # SageMaker access key + SAGEMAKER_SECRET_KEY: str = None # SageMaker secret key + path = Path(__file__).parent.parent.absolute() settings = Settings(_env_file=path.joinpath(".env"), _env_file_encoding="utf-8") diff --git a/application/llm/sagemaker.py b/application/llm/sagemaker.py index 9ef5d0afe..84ae09adf 100644 --- a/application/llm/sagemaker.py +++ b/application/llm/sagemaker.py @@ -1,27 +1,139 @@ from application.llm.base import BaseLLM from application.core.settings import settings -import requests import json +import io + + + +class LineIterator: + """ + A helper class for parsing the byte stream input. + + The output of the model will be in the following format: + ``` + b'{"outputs": [" a"]}\n' + b'{"outputs": [" challenging"]}\n' + b'{"outputs": [" problem"]}\n' + ... + ``` + + While usually each PayloadPart event from the event stream will contain a byte array + with a full json, this is not guaranteed and some of the json objects may be split across + PayloadPart events. For example: + ``` + {'PayloadPart': {'Bytes': b'{"outputs": '}} + {'PayloadPart': {'Bytes': b'[" problem"]}\n'}} + ``` + + This class accounts for this by concatenating bytes written via the 'write' function + and then exposing a method which will return lines (ending with a '\n' character) within + the buffer via the 'scan_lines' function. It maintains the position of the last read + position to ensure that previous bytes are not exposed again. + """ + + def __init__(self, stream): + self.byte_iterator = iter(stream) + self.buffer = io.BytesIO() + self.read_pos = 0 + + def __iter__(self): + return self + + def __next__(self): + while True: + self.buffer.seek(self.read_pos) + line = self.buffer.readline() + if line and line[-1] == ord('\n'): + self.read_pos += len(line) + return line[:-1] + try: + chunk = next(self.byte_iterator) + except StopIteration: + if self.read_pos < self.buffer.getbuffer().nbytes: + continue + raise + if 'PayloadPart' not in chunk: + print('Unknown event type:' + chunk) + continue + self.buffer.seek(0, io.SEEK_END) + self.buffer.write(chunk['PayloadPart']['Bytes']) class SagemakerAPILLM(BaseLLM): def __init__(self, *args, **kwargs): - self.url = settings.SAGEMAKER_API_URL + import boto3 + runtime = boto3.client( + 'runtime.sagemaker', + aws_access_key_id='xxx', + aws_secret_access_key='xxx', + region_name='us-west-2' + ) + + + self.endpoint = settings.SAGEMAKER_ENDPOINT + self.runtime = runtime + def gen(self, model, engine, messages, stream=False, **kwargs): context = messages[0]['content'] user_question = messages[-1]['content'] prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n" + - response = requests.post( - url=self.url, - headers={ - "Content-Type": "application/json; charset=utf-8", - }, - data=json.dumps({"input": prompt}) - ) + # Construct payload for endpoint + payload = { + "inputs": prompt, + "stream": False, + "parameters": { + "do_sample": True, + "temperature": 0.1, + "max_new_tokens": 30, + "repetition_penalty": 1.03, + "stop": ["", "###"] + } + } + body_bytes = json.dumps(payload).encode('utf-8') - return response.json()['answer'] + # Invoke the endpoint + response = self.runtime.invoke_endpoint(EndpointName=self.endpoint, + ContentType='application/json', + Body=body_bytes) + result = json.loads(response['Body'].read().decode()) + import sys + print(result[0]['generated_text'], file=sys.stderr) + return result[0]['generated_text'][len(prompt):] def gen_stream(self, model, engine, messages, stream=True, **kwargs): - raise NotImplementedError("Sagemaker does not support streaming") \ No newline at end of file + context = messages[0]['content'] + user_question = messages[-1]['content'] + prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n" + + + # Construct payload for endpoint + payload = { + "inputs": prompt, + "stream": True, + "parameters": { + "do_sample": True, + "temperature": 0.1, + "max_new_tokens": 512, + "repetition_penalty": 1.03, + "stop": ["", "###"] + } + } + body_bytes = json.dumps(payload).encode('utf-8') + + # Invoke the endpoint + response = self.runtime.invoke_endpoint_with_response_stream(EndpointName=self.endpoint, + ContentType='application/json', + Body=body_bytes) + #result = json.loads(response['Body'].read().decode()) + event_stream = response['Body'] + start_json = b'{' + for line in LineIterator(event_stream): + if line != b'' and start_json in line: + #print(line) + data = json.loads(line[line.find(start_json):].decode('utf-8')) + if data['token']['text'] not in ["", "###"]: + print(data['token']['text'],end='') + yield data['token']['text'] \ No newline at end of file diff --git a/application/vectorstore/faiss.py b/application/vectorstore/faiss.py index 5c5cee703..217b04571 100644 --- a/application/vectorstore/faiss.py +++ b/application/vectorstore/faiss.py @@ -1,5 +1,5 @@ from application.vectorstore.base import BaseVectorStore -from langchain import FAISS +from langchain.vectorstores import FAISS from application.core.settings import settings class FaissStore(BaseVectorStore): diff --git a/docker-compose.yaml b/docker-compose.yaml index 7535a4b1f..84cc5681c 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -19,7 +19,6 @@ services: - CELERY_BROKER_URL=redis://redis:6379/0 - CELERY_RESULT_BACKEND=redis://redis:6379/1 - MONGO_URI=mongodb://mongo:27017/docsgpt - - SELF_HOSTED_MODEL=$SELF_HOSTED_MODEL ports: - "7091:7091" volumes: diff --git a/docs/pages/Deploying/Quickstart.md b/docs/pages/Deploying/Quickstart.md index 7ffc96749..2cc03c5ac 100644 --- a/docs/pages/Deploying/Quickstart.md +++ b/docs/pages/Deploying/Quickstart.md @@ -10,7 +10,7 @@ It will install all the dependencies and give you an option to download the loca Otherwise, refer to this Guide: 1. Open and download this repository with `git clone https://github.com/arc53/DocsGPT.git`. -2. Create a `.env` file in your root directory and set your `API_KEY` with your openai api key. +2. Create a `.env` file in your root directory and set your `API_KEY` with your [OpenAI api key](https://platform.openai.com/account/api-keys). 3. Run `docker-compose build && docker-compose up`. 4. Navigate to `http://localhost:5173/`. diff --git a/docs/pages/Developing/API-docs.md b/docs/pages/Developing/API-docs.md index 1324f7a91..258387428 100644 --- a/docs/pages/Developing/API-docs.md +++ b/docs/pages/Developing/API-docs.md @@ -133,7 +133,7 @@ There are two types of responses: ``` ### /api/delete_old -Deletes old vecotstores: +Deletes old vectorstores: ```js // Task status (GET http://127.0.0.1:5000/api/docs_check) fetch("http://localhost:5001/api/task_status?task_id=b2d2a0f4-387c-44fd-a443-e4fe2e7454d1", { diff --git a/docs/pages/Guides/Customising-prompts.md b/docs/pages/Guides/Customising-prompts.md index 3261b7daa..1d3a7d4aa 100644 --- a/docs/pages/Guides/Customising-prompts.md +++ b/docs/pages/Guides/Customising-prompts.md @@ -1,4 +1,4 @@ -## To customise a main prompt navigate to `/application/prompt/combine_prompt.txt` +## To customize a main prompt navigate to `/application/prompt/combine_prompt.txt` You can try editing it to see how the model responses. diff --git a/docs/pages/Guides/How-to-train-on-other-documentation.md b/docs/pages/Guides/How-to-train-on-other-documentation.md index 799fa9baa..c9549aed4 100644 --- a/docs/pages/Guides/How-to-train-on-other-documentation.md +++ b/docs/pages/Guides/How-to-train-on-other-documentation.md @@ -35,7 +35,7 @@ It will tell you how much it will cost Once you run it will use new context that is relevant to your documentation Make sure you select default in the dropdown in the UI -## Customisation +## Customization You can learn more about options while running ingest.py by running: `python ingest.py --help` diff --git a/frontend/src/Navigation.tsx b/frontend/src/Navigation.tsx index 366fade90..9507cfe1a 100644 --- a/frontend/src/Navigation.tsx +++ b/frontend/src/Navigation.tsx @@ -201,7 +201,9 @@ export default function Navigation() {

- {conversation.name} + {conversation.name.length > 45 + ? conversation.name.substring(0, 45) + '...' + : conversation.name}

@@ -227,11 +229,11 @@ export default function Navigation() {
setIsDocsListOpen(!isDocsListOpen)} > {selectedDocs && ( -

+

{selectedDocs.name} {selectedDocs.version}

)} diff --git a/tests/llm/test_sagemaker.py b/tests/llm/test_sagemaker.py new file mode 100644 index 000000000..f8d02d85d --- /dev/null +++ b/tests/llm/test_sagemaker.py @@ -0,0 +1,96 @@ +# FILEPATH: /path/to/test_sagemaker.py + +import json +import unittest +from unittest.mock import MagicMock, patch +from application.llm.sagemaker import SagemakerAPILLM, LineIterator + +class TestSagemakerAPILLM(unittest.TestCase): + + def setUp(self): + self.sagemaker = SagemakerAPILLM() + self.context = "This is the context" + self.user_question = "What is the answer?" + self.messages = [ + {"content": self.context}, + {"content": "Some other message"}, + {"content": self.user_question} + ] + self.prompt = f"### Instruction \n {self.user_question} \n ### Context \n {self.context} \n ### Answer \n" + self.payload = { + "inputs": self.prompt, + "stream": False, + "parameters": { + "do_sample": True, + "temperature": 0.1, + "max_new_tokens": 30, + "repetition_penalty": 1.03, + "stop": ["", "###"] + } + } + self.payload_stream = { + "inputs": self.prompt, + "stream": True, + "parameters": { + "do_sample": True, + "temperature": 0.1, + "max_new_tokens": 512, + "repetition_penalty": 1.03, + "stop": ["", "###"] + } + } + self.body_bytes = json.dumps(self.payload).encode('utf-8') + self.body_bytes_stream = json.dumps(self.payload_stream).encode('utf-8') + self.response = { + "Body": MagicMock() + } + self.result = [ + { + "generated_text": "This is the generated text" + } + ] + self.response['Body'].read.return_value.decode.return_value = json.dumps(self.result) + + def test_gen(self): + with patch.object(self.sagemaker.runtime, 'invoke_endpoint', + return_value=self.response) as mock_invoke_endpoint: + output = self.sagemaker.gen(None, None, self.messages) + mock_invoke_endpoint.assert_called_once_with( + EndpointName=self.sagemaker.endpoint, + ContentType='application/json', + Body=self.body_bytes + ) + self.assertEqual(output, + self.result[0]['generated_text'][len(self.prompt):]) + + def test_gen_stream(self): + with patch.object(self.sagemaker.runtime, 'invoke_endpoint_with_response_stream', + return_value=self.response) as mock_invoke_endpoint: + output = list(self.sagemaker.gen_stream(None, None, self.messages)) + mock_invoke_endpoint.assert_called_once_with( + EndpointName=self.sagemaker.endpoint, + ContentType='application/json', + Body=self.body_bytes_stream + ) + self.assertEqual(output, []) + +class TestLineIterator(unittest.TestCase): + + def setUp(self): + self.stream = [ + {'PayloadPart': {'Bytes': b'{"outputs": [" a"]}\n'}}, + {'PayloadPart': {'Bytes': b'{"outputs": [" challenging"]}\n'}}, + {'PayloadPart': {'Bytes': b'{"outputs": [" problem"]}\n'}} + ] + self.line_iterator = LineIterator(self.stream) + + def test_iter(self): + self.assertEqual(iter(self.line_iterator), self.line_iterator) + + def test_next(self): + self.assertEqual(next(self.line_iterator), b'{"outputs": [" a"]}') + self.assertEqual(next(self.line_iterator), b'{"outputs": [" challenging"]}') + self.assertEqual(next(self.line_iterator), b'{"outputs": [" problem"]}') + +if __name__ == '__main__': + unittest.main() \ No newline at end of file