Skip to content

Commit

Permalink
Merge pull request #26 from inFocus7/add-linter-on-prs
Browse files Browse the repository at this point in the history
Add PyLint Workflow for PRs
  • Loading branch information
inFocus7 authored Jan 17, 2024
2 parents 7745218 + 04a622b commit 6703921
Show file tree
Hide file tree
Showing 20 changed files with 1,498 additions and 562 deletions.
22 changes: 22 additions & 0 deletions .github/workflows/pylint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
name: Python linter
on: [push]
jobs:
lint:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.11"]
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install pylint
- name: Analysing the code with pylint
run: |
pylint $(git ls-files '*.py')
10 changes: 10 additions & 0 deletions .pylintrc
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[MAIN]
max-line-length=120
max-attributes=10
max-locals=20
# Currently, this is added because gradio Inputs don't support passing tuples/dataclasses/etc. as arguments, meaning I
# can't shorten some methods that take a lot of arguments.
disable=too-many-arguments

[TYPECHECK]
generated-members=gradio.components.dropdown.*,gradio.components.button.*,cv2.*
97 changes: 76 additions & 21 deletions api/chatgpt.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,25 @@
"""
This module provides utility functions for interacting with the OpenAI API and Gradio interfaces.
"""
import os
from typing import Optional
import openai
from openai import OpenAI
import os
import gradio as gr


# The actual gradio image name (+ orig_name) is <>.png, but the tmp file created and sent to
# batch is based on the portion after the last `/` in the url without the '%' (looks url encoded).
def url_to_gradio_image_name(url):
def url_to_gradio_image_name(url: str) -> str:
"""
Converts an OpenAI generated URL to a Gradio-compatible image name.
This function extracts the portion of the URL after the last forward slash ('/'). It removes special characters
often found in URLs such as '%', '&', and '='. The resulting string is truncated to a maximum length of 200
characters to prevent issues with file name length limitations.
:param url: The URL containing the image name.
:returns: A cleaned and truncated version of the image name suitable for use with Gradio.
"""

# Get the part after the final `/` in the URL
image_name = url.rsplit('/', 1)[-1]

Expand All @@ -21,7 +34,18 @@ def url_to_gradio_image_name(url):
return image_name


def get_openai_client(api_key):
def get_openai_client(api_key: Optional[str] = None) -> Optional[OpenAI]:
"""
Creates and returns an OpenAI client object configured with the given API key.
This function initializes an OpenAI client using the provided API key. If the provided API key is None or empty,
it attempts to retrieve the API key from the environment variable 'OPENAI_API_KEY'. If the environment variable is
also not set, it raises a warning and returns None.
:param api_key: The API key for OpenAI. If not provided, the function will try to use the API key from the
environment variable.
:returns: An instance of the OpenAI client configured with the API key, or None if no valid API key is provided.
"""
if api_key is None or api_key == "":
api_key = os.environ.get("OPENAI_API_KEY")
if api_key is None or api_key == "":
Expand All @@ -31,16 +55,33 @@ def get_openai_client(api_key):
return OpenAI(api_key=api_key)


def get_chat_response(client: openai.Client, api_model: str, role: str, prompt: str, context: list = None, as_json: bool= False):
def get_chat_response(client: openai.Client, api_model: str, role: str, prompt: str, context: Optional[list] = None,
as_json: bool = False) -> Optional[str]:
"""
Generates a chat response using the OpenAI API based on the provided parameters.
This function sends a message to the OpenAI API using the specified client and model. It constructs a message with
a role (system or user) and the provided prompt. It also optionally includes previous chat context. The response
can be returned in JSON format if specified.
:param client: The OpenAI client to use for making the API call.
:param api_model: The model to use for the chat completion (e.g., 'davinci-codex').
:param role: The role the AI should assume.
:param prompt: The message prompt to send to the chat model.
:param context: A list of previous chat messages to provide context. Default is None.
:param as_json: A flag to specify if the response should be in JSON format. Default is False.
:returns: The chat response as a string, or None if there was an error or no response generated.
"""
message = [
{"role": "system",
"content": role},
]

# Give the model previous chat context
if context is not None and len(context) > 0:
for c in context:
message.append(c)
for curr_context in context:
message.append(curr_context)

message.append({
"role": "user",
Expand All @@ -59,29 +100,43 @@ def get_chat_response(client: openai.Client, api_model: str, role: str, prompt:
messages=message,
)



response = response.choices[0]

if response.finish_reason != "stop":
if response.finish_reason == "length":
gr.Warning(
f"finish_reason: {response.finish_reason}. The maximum number of tokens specified in the request was reached.")
return None, None, None
elif response.finish_reason == "content_filter":
gr.Warning(
f"finish_reason: {response.finish_reason}. The content was omitted due to a flag from OpenAI's content filters.")
return None, None, None
match response.finish_reason:
case "length":
gr.Warning(
f"finish_reason: {response.finish_reason}. The maximum number of tokens specified in the request "
f"was reached.")
return None
case "content_filter":
gr.Warning(
f"finish_reason: {response.finish_reason}. The content was omitted due to a flag from OpenAI's "
f"content filters.")
return None

content = response.message.content
if content is None or content == "":
gr.Warning("No content was generated.")
return None, None
return None

return content


def get_image_response(client: openai.Client, api_model: str, prompt: str, portrait=False):
def get_image_response(client: openai.Client, api_model: str, prompt: str, portrait=False) -> Optional[str]:
"""
Generates an image response using the OpenAI API based on a given prompt and specified parameters.
This function requests the OpenAI API to generate an image based on the provided text prompt. It allows
specification of the model to use and whether the generated image should be in a portrait format. For 'dall-e-3'
model, it supports high-definition (HD) quality image generation.
:param client: The OpenAI client to use for making the API call.
:param api_model: The model to use for image generation (e.g., 'dall-e-3').
:param prompt: The text prompt based on which the image is generated.
:param portrait: A flag to specify if the generated image should be in portrait orientation. Default is False.
:returns: The URL of the generated image, or None if no image was generated or if there was an error.
"""
image_size = "1024x1024"
if portrait and api_model == "dall-e-3":
image_size = "1024x1792"
Expand Down
9 changes: 6 additions & 3 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
#!/usr/bin/env python3
"""
This is the main file for the web app. It launches the web app and initializes the font manager and inflect engine.
"""
# !/usr/bin/env python3
# -*- coding: utf-8 -*

import ui.ui as ui
import utils.font_manager as font_manager
from ui import ui
from utils import font_manager

if __name__ == '__main__':
# Initialize fonts, and svg file grabber at start
Expand Down
Loading

0 comments on commit 6703921

Please sign in to comment.