diff --git a/.github/workflows/pipeline-stg.yaml b/.github/workflows/pipeline-stg.yaml index c4ef132..dec2c52 100644 --- a/.github/workflows/pipeline-stg.yaml +++ b/.github/workflows/pipeline-stg.yaml @@ -49,4 +49,4 @@ jobs: push: true tags: ${{ steps.prep.outputs.tagged_image }},${{ steps.prep.outputs.image - }}:latest \ No newline at end of file + }}:latest diff --git a/.github/workflows/pipeline.yaml b/.github/workflows/pipeline.yaml index 63484bc..c844a9c 100644 --- a/.github/workflows/pipeline.yaml +++ b/.github/workflows/pipeline.yaml @@ -49,4 +49,4 @@ jobs: push: true tags: ${{ steps.prep.outputs.tagged_image }},${{ steps.prep.outputs.image - }}:latest \ No newline at end of file + }}:latest diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..3e7cc75 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,27 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-docstring-first + - id: check-yaml + - id: debug-statements + # - repo: https://github.com/PyCQA/flake8 + # rev: 3.9.2 + # hooks: + # - id: flake8 + - repo: https://github.com/psf/black + rev: 22.10.0 + hooks: + - id: black + # - repo: https://github.com/asottile/reorder_python_imports + # rev: v2.5.0 + # hooks: + # - id: reorder-python-imports + # args: [--py39-plus] + - repo: https://github.com/asottile/add-trailing-comma + rev: v2.1.0 + hooks: + - id: add-trailing-comma + args: [--py36-plus] diff --git a/README.md b/README.md index 8265ed2..e239fb5 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,12 @@ # logos-svc +Install + +``` +rye sync +rye run pre-commit install +``` + Run `rye run uvicorn app.server:app --reload` @@ -24,4 +31,3 @@ Send a command to the server Tests `rye run pytest -s tests` - diff --git a/app/__init__.py b/app/__init__.py index c121879..bf6bd6c 100644 --- a/app/__init__.py +++ b/app/__init__.py @@ -1,2 +1,3 @@ from dotenv import load_dotenv -load_dotenv() \ No newline at end of file + +load_dotenv() diff --git a/app/animations/__init__.py b/app/animations/__init__.py index e1bafed..040a8e3 100644 --- a/app/animations/__init__.py +++ b/app/animations/__init__.py @@ -3,4 +3,4 @@ from .story import animated_story from .reel import animated_reel from .comic import illustrated_comic -from .little_martians import little_martian_poster \ No newline at end of file +from .little_martians import little_martian_poster diff --git a/app/animations/animation.py b/app/animations/animation.py index 45e4c99..7f0bb01 100644 --- a/app/animations/animation.py +++ b/app/animations/animation.py @@ -21,7 +21,7 @@ def select_random_voice(character: Character = None): prompt=prompt, model="gpt-3.5-turbo", params={"temperature": 0.0, "max_tokens": 10}, - output_schema=gender_schema + output_schema=gender_schema, ) try: @@ -35,22 +35,19 @@ def select_random_voice(character: Character = None): def talking_head( character: Character, - text: str, + text: str, width: Optional[int] = None, height: Optional[int] = None, gfpgan: bool = False, - gfpgan_upscale: int = 1 + gfpgan_upscale: int = 1, ) -> str: print("* talking head: {character.name} says {text}") if character.voice: voice_id = character.voice else: voice_id = select_random_voice(character) - - audio_bytes = elevenlabs.tts( - text, - voice=voice_id - ) + + audio_bytes = elevenlabs.tts(text, voice=voice_id) audio_url = s3.upload(audio_bytes, "mp3") @@ -64,7 +61,7 @@ def talking_head( width=width, height=height, ) - + print(f"output: {output_url}") return output_url, thumbnail_url @@ -75,7 +72,7 @@ def screenplay_clip( speech: str, image_text: str, width: Optional[int] = None, - height: Optional[int] = None + height: Optional[int] = None, ) -> str: if not character: voice_id = select_random_voice() @@ -84,10 +81,7 @@ def screenplay_clip( voice_id = character.voice else: voice_id = select_random_voice(character) - audio_bytes = elevenlabs.tts( - speech, - voice=voice_id - ) + audio_bytes = elevenlabs.tts(speech, voice=voice_id) audio_url = s3.upload(audio_bytes, "mp3") video_url, thumbnail_url = replicate.txt2vid( interpolation_texts=[image_text], @@ -106,20 +100,20 @@ def comic_strip( caption_padding_top: int = 10, line_spacing: int = 1.3, font_size: int = 48, - font_ttf: str = 'Raleway-Light.ttf' + font_ttf: str = "Raleway-Light.ttf", ): font = get_font(font_ttf, font_size) num_panels = len(images) caption_box_height = 3 * int(1.5 * font.size) - width, height = 1024, 1024 #images[0].size + width, height = 1024, 1024 # images[0].size total_width = width * 2 + margin total_height = height * 2 + caption_box_height * 2 + margin - composite_image = Image.new('RGB', (total_width, total_height), color='white') + composite_image = Image.new("RGB", (total_width, total_height), color="white") draw = ImageDraw.Draw(composite_image) - draw.rectangle([(0, 0), (total_width, total_height)], fill='black') + draw.rectangle([(0, 0), (total_width, total_height)], fill="black") caption_box_height = 3 * int(1.5 * font.size) + 2 * caption_padding_top @@ -133,12 +127,24 @@ def comic_strip( else: if num_panels == 3: x = width + margin - y = ((i - 1) * (height + caption_box_height + margin)) if i == 1 else ((i - 1) * (height + caption_box_height)) + y = ( + ((i - 1) * (height + caption_box_height + margin)) + if i == 1 + else ((i - 1) * (height + caption_box_height)) + ) new_height = height new_width = width else: - x = (i % 2) * (width + margin) if i % 2 == 0 else (i % 2) * width + margin - y = (i // 2) * (height + caption_box_height) if i // 2 == 0 else (i // 2) * (height + caption_box_height + margin) + x = ( + (i % 2) * (width + margin) + if i % 2 == 0 + else (i % 2) * width + margin + ) + y = ( + (i // 2) * (height + caption_box_height) + if i // 2 == 0 + else (i // 2) * (height + caption_box_height + margin) + ) new_height = height new_width = width @@ -146,19 +152,23 @@ def comic_strip( composite_image.paste(resized_image, (x, y)) - caption_box = Image.new('RGB', (new_width, caption_box_height), color='black') + caption_box = Image.new("RGB", (new_width, caption_box_height), color="black") draw = ImageDraw.Draw(caption_box) wrapped_caption = wrap_text(draw, captions[i], font, new_width - 2 * padding) caption_y = caption_padding_top for line in wrapped_caption: - draw.text((padding, caption_y), line, fill='white', font=font) + draw.text((padding, caption_y), line, fill="white", font=font) caption_y += int(line_spacing * font.size) composite_image.paste(caption_box, (x, y + new_height)) if (num_panels == 4 and i == 0) or (num_panels == 3 and i == 1): - thumbnail = Image.new('RGB', (new_width, new_height + caption_box_height), color='white') + thumbnail = Image.new( + "RGB", + (new_width, new_height + caption_box_height), + color="white", + ) thumbnail.paste(resized_image, (0, 0)) thumbnail.paste(caption_box, (0, new_height)) @@ -172,46 +182,53 @@ def poster( caption_padding_top: int = 10, line_spacing: int = 1.3, font_size: int = 36, - font_ttf: str = 'Raleway-Light.ttf', + font_ttf: str = "Raleway-Light.ttf", shadow_offset: tuple = (1, 1.4), - font_color: str = '#e7e7e7', - shadow_color: str = '#d3d3d3' + font_color: str = "#e7e7e7", + shadow_color: str = "#d3d3d3", ): font = get_font(font_ttf, font_size) width, height = image.size - draw = ImageDraw.Draw(Image.new('RGB', (width, height), (0, 0, 0))) - caption = caption.replace('\n', ' ') + draw = ImageDraw.Draw(Image.new("RGB", (width, height), (0, 0, 0))) + caption = caption.replace("\n", " ") wrapped_caption = wrap_text(draw, caption, font, width - 2 * margin) num_lines = len(wrapped_caption) - caption_box_height = num_lines * int(line_spacing * font.size) + 2 * caption_padding_top + caption_box_height = ( + num_lines * int(line_spacing * font.size) + 2 * caption_padding_top + ) total_width = width + margin total_height = height + caption_box_height + margin - composite_image = Image.new('RGB', (total_width, total_height), color='white') + composite_image = Image.new("RGB", (total_width, total_height), color="white") draw = ImageDraw.Draw(composite_image) - draw.rectangle([(0, 0), (total_width, total_height)], fill='black') + draw.rectangle([(0, 0), (total_width, total_height)], fill="black") resized_image = image.resize((width, height)) - composite_image.paste(resized_image, (int(margin/2), int(margin/2))) + composite_image.paste(resized_image, (int(margin / 2), int(margin / 2))) - caption_box = Image.new('RGB', (total_width, caption_box_height), color='black') + caption_box = Image.new("RGB", (total_width, caption_box_height), color="black") draw = ImageDraw.Draw(caption_box) caption_y = caption_padding_top for line in wrapped_caption: - draw.text((margin + shadow_offset[0], caption_y + shadow_offset[1]), line, fill=shadow_color, font=font) + draw.text( + (margin + shadow_offset[0], caption_y + shadow_offset[1]), + line, + fill=shadow_color, + font=font, + ) draw.text((margin, caption_y), line, fill=font_color, font=font) caption_y += int(line_spacing * font.size) composite_image.paste(caption_box, (0, height)) - thumbnail = Image.new('RGB', (width, height + caption_box_height), color='white') + thumbnail = Image.new("RGB", (width, height + caption_box_height), color="white") thumbnail.paste(resized_image, (0, 0)) thumbnail.paste(caption_box, (0, height)) - return composite_image, thumbnail \ No newline at end of file + return composite_image, thumbnail diff --git a/app/animations/comic.py b/app/animations/comic.py index 9ebeac6..592633e 100644 --- a/app/animations/comic.py +++ b/app/animations/comic.py @@ -14,9 +14,9 @@ def illustrated_comic(request: ComicRequest): params = {"temperature": 1.0, "max_tokens": 2000, **request.params} loras = { - "Verdelis": "https://edenartlab-prod-data.s3.us-east-1.amazonaws.com/f290723c93715a8eb14e589ca1eec211e10691f683d53cde37139bc7d3a91c22.tar" + "Verdelis": "https://edenartlab-prod-data.s3.us-east-1.amazonaws.com/f290723c93715a8eb14e589ca1eec211e10691f683d53cde37139bc7d3a91c22.tar", } - + comicwriter = LLM( model=request.model, system_message=comicwriter_system_template.template, @@ -24,31 +24,29 @@ def illustrated_comic(request: ComicRequest): ) comic_book = comicwriter(request.prompt, output_schema=ComicResult) - + def run_panel(panel, idx): # pick lora of character # pick init image character x genre - return replicate.sdxl({ - "text_input": panel['image'], - "lora": loras["Verdelis"], - "width": 512 if idx == 0 else 1024, - "height": 1024, - "n_samples": 1, - }) - - results = utils.process_in_parallel( - comic_book['panels'], - run_panel, - max_workers=4 - ) + return replicate.sdxl( + { + "text_input": panel["image"], + "lora": loras["Verdelis"], + "width": 512 if idx == 0 else 1024, + "height": 1024, + "n_samples": 1, + }, + ) + + results = utils.process_in_parallel(comic_book["panels"], run_panel, max_workers=4) image_urls = [image_url for image_url, thumbnail in results] images = [utils.download_image(url) for url in image_urls] - captions = [panel['caption'] for panel in comic_book['panels']] + captions = [panel["caption"] for panel in comic_book["panels"]] composite_image, thumbnail_image = comic_strip(images, captions) - + img_bytes = utils.PIL_to_bytes(composite_image, ext="JPEG") thumbnail_bytes = utils.PIL_to_bytes(thumbnail_image, ext="WEBP") diff --git a/app/animations/dialogue.py b/app/animations/dialogue.py index 5a35d43..bab3175 100644 --- a/app/animations/dialogue.py +++ b/app/animations/dialogue.py @@ -25,16 +25,13 @@ def animated_dialogue(request: DialogueRequest, callback=None): if callback: callback(progress=0.1) - + characters = { - character_id: EdenCharacter(character_id) + character_id: EdenCharacter(character_id) for character_id in request.character_ids } - images = [ - characters[character_id].image - for character_id in request.character_ids - ] - + images = [characters[character_id].image for character_id in request.character_ids] + width, height = utils.calculate_target_dimensions(list(set(images)), MAX_PIXELS) progress = 0.1 @@ -45,13 +42,13 @@ def run_talking_head_segment(message, idx): character = characters[message["character_id"]] print(f'run talking head: {message["message"]}') output, _ = talking_head( - character, - message["message"], - width, + character, + message["message"], + width, height, - gfpgan=request.gfpgan + gfpgan=request.gfpgan, ) - print(f'output: {output}') + print(f"output: {output}") with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file: print("download:", output) response = requests.get(output, stream=True) @@ -70,7 +67,7 @@ def run_talking_head_segment(message, idx): video_files = utils.process_in_parallel( result.dialogue, run_talking_head_segment, - max_workers=MAX_WORKERS + max_workers=MAX_WORKERS, ) print("--- end video file tasks ----") @@ -78,7 +75,7 @@ def run_talking_head_segment(message, idx): if request.dual_view: print(" -> dual view") - cropped_images = {} + cropped_images = {} for character in characters: temp_file = tempfile.NamedTemporaryFile(suffix=".webp", delete=False) image = utils.download_image(characters[character].image) @@ -86,9 +83,10 @@ def run_talking_head_segment(message, idx): image.save(temp_file, format="png") cropped_images[character] = temp_file.name dual_video_files = [] - for idx, (message, video_file) in enumerate(zip(result.dialogue, video_files)): + for idx, (message, video_file) in enumerate(zip(result.dialogue, video_files)): opposing_character_id = next( - c for c in characters + c + for c in characters if characters[c].character_id != message["character_id"] ) image = cropped_images[opposing_character_id] @@ -103,20 +101,19 @@ def run_talking_head_segment(message, idx): if request.intro_screen: print(" -> intro screen") - character_names = [characters[character_id].name for character_id in request.character_ids] - character_name_str = " and ".join(character_names) - paragraphs = [ - request.prompt, - f"Dialogue between {character_name_str}" + character_names = [ + characters[character_id].name for character_id in request.character_ids ] + character_name_str = " and ".join(character_names) + paragraphs = [request.prompt, f"Dialogue between {character_name_str}"] intro_screen = utils.video_textbox( - paragraphs, - width * 2 if request.dual_view else width, - height, - duration = 8, - fade_in = 1.5, - margin_left = 25, - margin_right = 25 #width + 25 + paragraphs, + width * 2 if request.dual_view else width, + height, + duration=8, + fade_in=1.5, + margin_left=25, + margin_right=25, # width + 25 ) video_files = [intro_screen] + video_files @@ -126,7 +123,7 @@ def run_talking_head_segment(message, idx): with tempfile.NamedTemporaryFile(delete=True, suffix=".mp4") as temp_output_file: print("concatenate videos") utils.concatenate_videos(video_files, temp_output_file.name) - with open(temp_output_file.name, 'rb') as f: + with open(temp_output_file.name, "rb") as f: video_bytes = f.read() output_url = s3.upload(video_bytes, "mp4") @@ -136,11 +133,11 @@ def run_talking_head_segment(message, idx): # generate thumbnail print("make thumbnail") - thumbnail = utils.create_dialogue_thumbnail(*images, 2*width, height) + thumbnail = utils.create_dialogue_thumbnail(*images, 2 * width, height) thumbnail_url = s3.upload(thumbnail, "webp") print("finished thumbnail", thumbnail_url) if callback: callback(progress=0.99) - return output_url, thumbnail_url \ No newline at end of file + return output_url, thumbnail_url diff --git a/app/animations/little_martians.py b/app/animations/little_martians.py index f9385a4..cb9234d 100644 --- a/app/animations/little_martians.py +++ b/app/animations/little_martians.py @@ -10,25 +10,29 @@ from ..models import LittleMartianRequest, Poster from .animation import poster from ..prompt_templates.little_martians import ( - littlemartians_poster_system, - littlemartians_poster_prompt, - littlemartians_data + littlemartians_poster_system, + littlemartians_poster_prompt, + littlemartians_data, ) + def random_interval(min, max): return random.random() * (max - min) + min + def little_martian_poster(request: LittleMartianRequest, callback=None): params = {"temperature": 1.0, "max_tokens": 2000, **request.params} - data = littlemartians_data[request.martian.value][request.setting.value][request.genre.value] + data = littlemartians_data[request.martian.value][request.setting.value][ + request.genre.value + ] - lora = data['lora'] - character_id = data['character_id'] - modifier = data['modifier'] - lora_scale = random_interval(*data['lora_scale']) - init_image = random.choice(data['init_images']) - init_image_strength = random_interval(*data['init_image_strength']) + lora = data["lora"] + character_id = data["character_id"] + modifier = data["modifier"] + lora_scale = random_interval(*data["lora_scale"]) + init_image = random.choice(data["init_images"]) + init_image_strength = random_interval(*data["init_image_strength"]) character = EdenCharacter(character_id) @@ -37,20 +41,20 @@ def little_martian_poster(request: LittleMartianRequest, callback=None): system_message=littlemartians_poster_system.template, params=params, ) - + prompt = littlemartians_poster_prompt.substitute( - martian = request.martian.value, - identity = character.identity, - setting = request.setting.value, - genre = request.genre.value, - premise = request.prompt, + martian=request.martian.value, + identity=character.identity, + setting=request.setting.value, + genre=request.genre.value, + premise=request.prompt, ) - + result = littlemartian_writer(prompt, output_schema=Poster) - prompt = result['image'] - - text_input = f'{modifier}, {prompt}' + prompt = result["image"] + + text_input = f"{modifier}, {prompt}" if request.aspect_ratio == "portrait": width, height = 1280, 1920 @@ -63,7 +67,7 @@ def little_martian_poster(request: LittleMartianRequest, callback=None): "text_input": text_input, "lora": lora, "lora_scale": lora_scale, - "init_image": f'https://edenartlab-prod-data.s3.us-east-1.amazonaws.com/{init_image}', + "init_image": f"https://edenartlab-prod-data.s3.us-east-1.amazonaws.com/{init_image}", "init_image_strength": init_image_strength, "width": width, "height": height, @@ -73,7 +77,7 @@ def little_martian_poster(request: LittleMartianRequest, callback=None): image_url, thumbnail_url = replicate.sdxl(config) - caption = result['caption'] + caption = result["caption"] print(caption) image = utils.download_image(image_url) diff --git a/app/animations/monologue.py b/app/animations/monologue.py index 06ef6ab..e47de31 100644 --- a/app/animations/monologue.py +++ b/app/animations/monologue.py @@ -12,37 +12,35 @@ def animated_monologue(request: MonologueRequest, callback=None): result = monologue(request) - + if callback: callback(progress=0.2) - + character = EdenCharacter(request.character_id) - + width, height = calculate_target_dimensions([character.image], MAX_PIXELS) output, thumbnail_url = talking_head( - character, - result.monologue, - width, + character, + result.monologue, + width, height, - gfpgan=request.gfpgan + gfpgan=request.gfpgan, ) if request.intro_screen: - #image = download_image(character.image) - #width, height = image.size + # image = download_image(character.image) + # width, height = image.size - text = [ - f"{character.name}: {request.prompt}" - ] + text = [f"{character.name}: {request.prompt}"] intro_screen = video_textbox( - text, - width, - height, - duration = 8, - fade_in = 1.5, - margin_left = 25, - margin_right = 25 + text, + width, + height, + duration=8, + fade_in=1.5, + margin_left=25, + margin_right=25, ) with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file: @@ -51,20 +49,23 @@ def animated_monologue(request: MonologueRequest, callback=None): for chunk in response.iter_content(chunk_size=8192): temp_file.write(chunk) temp_file.flush() - + video_files = [intro_screen, temp_file.name] print(video_files) print("cat") - with tempfile.NamedTemporaryFile(delete=True, suffix=".mp4") as temp_output_file: + with tempfile.NamedTemporaryFile( + delete=True, + suffix=".mp4", + ) as temp_output_file: print(video_files, temp_output_file.name) concatenate_videos(video_files, temp_output_file.name) - with open(temp_output_file.name, 'rb') as f: + with open(temp_output_file.name, "rb") as f: video_bytes = f.read() output_url = s3.upload(video_bytes, "mp4") print(output_url) - + else: output_bytes = requests.get(output).content output_url = s3.upload(output_bytes, "mp4") - + return output_url, thumbnail_url diff --git a/app/animations/reel.py b/app/animations/reel.py index 622b6e7..a9d6444 100644 --- a/app/animations/reel.py +++ b/app/animations/reel.py @@ -29,8 +29,8 @@ def animated_reel(request: ReelRequest, callback=None): } # if voice is new, assign a random voice - if result['character']: - character_name = result['character'] + if result["character"]: + character_name = result["character"] if character_name not in character_name_lookup: characters[character_name] = Character(name=character_name) character_name_lookup[character_name] = character_name @@ -45,18 +45,18 @@ def animated_reel(request: ReelRequest, callback=None): width, height = 1920, 1088 else: width, height = 1600, 1600 - + min_duration = 25 speech_audio = None duration = min_duration if result["speech"]: if result["voiceover"] == "character": - character_id = character_name_lookup[result['character']] + character_id = character_name_lookup[result["character"]] character = characters.get(character_id) else: character = characters[request.narrator_id] - + if not character: voice_id = select_random_voice() else: @@ -64,11 +64,8 @@ def animated_reel(request: ReelRequest, callback=None): voice_id = character.voice else: voice_id = select_random_voice(character) - - speech_bytes = elevenlabs.tts( - result["speech"], - voice=voice_id - ) + + speech_bytes = elevenlabs.tts(result["speech"], voice=voice_id) speech_file = BytesIO(speech_bytes) speech_audio = AudioSegment.from_mp3(speech_file) @@ -79,12 +76,9 @@ def animated_reel(request: ReelRequest, callback=None): duration = max(min_duration, len(speech_audio) / 1000) - music_url, _ = replicate.audiocraft( - prompt=result["music_prompt"], - seconds=duration - ) + music_url, _ = replicate.audiocraft(prompt=result["music_prompt"], seconds=duration) music_bytes = requests.get(music_url).content - + if speech_audio: buffer = BytesIO() music_audio = AudioSegment.from_mp3(BytesIO(music_bytes)) @@ -120,24 +114,29 @@ def animated_reel(request: ReelRequest, callback=None): callback(progress=0.9) if request.intro_screen: - character_names = [characters[character_id].name for character_id in request.character_ids] + character_names = [ + characters[character_id].name for character_id in request.character_ids + ] character_name_str = ", ".join(character_names) paragraphs = [ request.prompt, f"Characters: {character_name_str}" if character_names else "", ] intro_screen = utils.video_textbox( - paragraphs, - width, - height, - duration = 6, - fade_in = 1, - margin_left = 25, - margin_right = 25 + paragraphs, + width, + height, + duration=6, + fade_in=1, + margin_left=25, + margin_right=25, ) video_files = [intro_screen, output_filename] - with tempfile.NamedTemporaryFile(delete=True, suffix=".mp4") as temp_output_file: + with tempfile.NamedTemporaryFile( + delete=True, + suffix=".mp4", + ) as temp_output_file: utils.concatenate_videos(video_files, temp_output_file.name) with open(temp_output_file.name, "rb") as f: video_bytes = f.read() diff --git a/app/animations/story.py b/app/animations/story.py index b92ee41..d82dcf3 100644 --- a/app/animations/story.py +++ b/app/animations/story.py @@ -16,7 +16,7 @@ def animated_story(request: StoryRequest, callback=None): screenplay = story(request) - + music_prompt = screenplay.get("music_prompt") if callback: @@ -35,10 +35,10 @@ def animated_story(request: StoryRequest, callback=None): } # if any character is new, assign a random voice - for clip in screenplay['clips']: - if not clip['character']: + for clip in screenplay["clips"]: + if not clip["character"]: continue - character_name = clip['character'] + character_name = clip["character"] if character_name not in character_name_lookup: characters[character_name] = Character(name=character_name) character_name_lookup[character_name] = character_name @@ -55,12 +55,16 @@ def animated_story(request: StoryRequest, callback=None): def run_story_segment(clip, idx): nonlocal progress, progress_increment if clip["voiceover"] == "character": - character_id = character_name_lookup[clip['character']] + character_id = character_name_lookup[clip["character"]] character = characters.get(character_id) else: character = characters[request.narrator_id] output_filename, thumbnail_url = screenplay_clip( - character, clip["speech"], clip["image_prompt"], width, height + character, + clip["speech"], + clip["image_prompt"], + width, + height, ) progress += progress_increment if callback: @@ -68,7 +72,9 @@ def run_story_segment(clip, idx): return output_filename, thumbnail_url results = utils.process_in_parallel( - screenplay["clips"], run_story_segment, max_workers=MAX_WORKERS + screenplay["clips"], + run_story_segment, + max_workers=MAX_WORKERS, ) print("results...") @@ -78,20 +84,22 @@ def run_story_segment(clip, idx): thumbnail_url = results[0][1] if request.intro_screen: - character_names = [characters[character_id].name for character_id in request.character_ids] + character_names = [ + characters[character_id].name for character_id in request.character_ids + ] character_name_str = ", ".join(character_names) paragraphs = [ request.prompt, f"Characters: {character_name_str}" if character_names else "", ] intro_screen = utils.video_textbox( - paragraphs, - width, - height, - duration = INTRO_SCREEN_DURATION, - fade_in = 2, - margin_left = 25, - margin_right = 25 + paragraphs, + width, + height, + duration=INTRO_SCREEN_DURATION, + fade_in=2, + margin_left=25, + margin_right=25, ) video_files = [intro_screen] + video_files @@ -100,12 +108,11 @@ def run_story_segment(clip, idx): audio_file = None if music_prompt: - duration = sum([utils.get_video_duration(video_file) for video_file in video_files]) - - music_url, _ = replicate.audiocraft( - prompt=music_prompt, - seconds=duration + duration = sum( + [utils.get_video_duration(video_file) for video_file in video_files], ) + + music_url, _ = replicate.audiocraft(prompt=music_prompt, seconds=duration) print(music_url) response = requests.get(music_url) @@ -130,8 +137,15 @@ def run_story_segment(clip, idx): with tempfile.NamedTemporaryFile(delete=True, suffix=".mp4") as temp_output_file: utils.concatenate_videos(video_files, temp_output_file.name) if audio_file: - with tempfile.NamedTemporaryFile(delete=True, suffix=".mp4") as temp_output_file2: - utils.mix_video_audio(temp_output_file.name, audio_file.name, temp_output_file2.name) + with tempfile.NamedTemporaryFile( + delete=True, + suffix=".mp4", + ) as temp_output_file2: + utils.mix_video_audio( + temp_output_file.name, + audio_file.name, + temp_output_file2.name, + ) with open(temp_output_file2.name, "rb") as f: video_bytes = f.read() else: diff --git a/app/character.py b/app/character.py index d60ce04..8f9453f 100644 --- a/app/character.py +++ b/app/character.py @@ -75,7 +75,7 @@ def __init__( self.chat = LLM(params=self.chat_params) self.knowledge_summary = "" - + self.update( name=name, identity=identity, @@ -89,7 +89,6 @@ def __init__( image=image, voice=voice, ) - def update( self, @@ -113,30 +112,34 @@ def update( self.creation_enabled = creation_enabled self.story_creation_enabled = story_creation_enabled self.concept = concept - self.smart_reply = False # smart_reply # disabled until ready + self.smart_reply = False # smart_reply # disabled until ready self.chat_model = chat_model self.image = image self.voice = voice self.function_map = {"1": self._chat_} - options = ["Regular conversation, chat, humor, small talk, or a asking for a question or comment about an attached image"] + options = [ + "Regular conversation, chat, humor, small talk, or a asking for a question or comment about an attached image", + ] if knowledge: if not self.knowledge_summary.strip(): - self.knowledge_summary = summary( - SummaryRequest(text=self.knowledge) - ) + self.knowledge_summary = summary(SummaryRequest(text=self.knowledge)) options.append("A question about or reference to your knowledge") knowledge_summary = ( f"You have the following knowledge: {self.knowledge_summary}" ) self.function_map[str(len(options))] = self._qa_ - + if creation_enabled: - options.append("A request for an image or simple video creation that isn't a story") + options.append( + "A request for an image or simple video creation that isn't a story", + ) self.function_map[str(len(options))] = self._create_ if story_creation_enabled: - options.append("A request to help write or draft a story, or to animate a finished story or turn it into a movie or film.") + options.append( + "A request to help write or draft a story, or to animate a finished story or turn it into a movie or film.", + ) self.function_map[str(len(options))] = self._story_create_ if len(options) == 1: @@ -146,7 +149,8 @@ def update( for i, option in enumerate(options): options_prompt += f"{i+1}. {option}\n" self.router_prompt = router_template.substitute( - knowledge_summary=knowledge_summary or "", options=options_prompt + knowledge_summary=knowledge_summary or "", + options=options_prompt, ) self.identity_prompt = identity_template.substitute( @@ -185,7 +189,6 @@ def update( self.chat.update(system_message=self.chat_prompt) self.reply.update(system_message=self.identity_prompt) - def __str__(self): def truncate(s): return (s[:47] + "...") if len(s) > 50 else s @@ -204,10 +207,7 @@ def truncate(s): ) def card(self): - return character_card.format( - name=self.name, - identity=self.identity - ) + return character_card.format(name=self.name, identity=self.identity) def think( self, @@ -220,8 +220,8 @@ def think( chat=message, ) result = self.reply( - prompt=user_message, - output_schema=Thought, + prompt=user_message, + output_schema=Thought, save_messages=False, model="gpt-4-1106-preview", ) @@ -232,7 +232,6 @@ def think( return R < probability - def _route_( self, message, @@ -244,9 +243,9 @@ def _route_( role = "Eden" if msg.role == "assistant" else "Me" router_prompt += f"{role}: {msg.content}\n" router_prompt += f"Me: {message.message}" - router_prompt = router_prompt[-5000:] # limit to 5000 characters + router_prompt = router_prompt[-5000:] # limit to 5000 characters index = self.router( - prompt=router_prompt, + prompt=router_prompt, save_messages=False, model="gpt-4-1106-preview", ) @@ -257,7 +256,6 @@ def _route_( else: return None - def _chat_( self, message, @@ -266,20 +264,19 @@ def _chat_( response = self.chat( prompt=message.message, image=message.attachments[0] if message.attachments else None, - id=session_id, + id=session_id, save_messages=False, model=self.chat_model, - ) + ) user_message = ChatMessage(role="user", content=message.message) assistant_message = ChatMessage(role="assistant", content=response) output = {"message": response, "config": None} return output, user_message, assistant_message - def _qa_(self, message, session_id=None) -> dict: response = self.qa( - prompt=message.message, - id=session_id, + prompt=message.message, + id=session_id, save_messages=False, model=self.chat_model, ) @@ -288,13 +285,12 @@ def _qa_(self, message, session_id=None) -> dict: output = {"message": response, "config": None} return output, user_message, assistant_message - def _create_( self, message, session_id=None, ) -> dict: - response = self.creator( + response = self.creator( prompt=message, id=session_id, input_schema=CreatorInput, @@ -333,29 +329,32 @@ def _create_( return output, user_message, assistant_message - def _story_create_( self, message, session_id=None, ) -> dict: - characters = self.story_context.memory( - "characters", - session_id=session_id, - ) or {} + characters = ( + self.story_context.memory( + "characters", + session_id=session_id, + ) + or {} + ) character_names = [c.lower() for c in characters] story_context_prompt = story_context_prompt_template.substitute( character_names=", ".join(character_names), - message=message.message + message=message.message, ) class ContextOutput(BaseModel): """ Any new names listed by the user """ + new_names: List[str] = [] response = self.story_context( @@ -366,14 +365,15 @@ class ContextOutput(BaseModel): ) new_names = [ - name for name in response["new_names"] + name + for name in response["new_names"] if name.lower() not in character_names ] for name in new_names: character = search_character(name) if character: - characters[name] = EdenCharacter(str(character['_id'])) + characters[name] = EdenCharacter(str(character["_id"])) self.story_context.memory( "characters", @@ -385,10 +385,13 @@ class ContextOutput(BaseModel): for name, character in characters.items(): additional_context += f"\n---\n{name}: {character.identity}\n" - draft = self.story_context.memory( - "draft", - session_id=session_id, - ) or "none" + draft = ( + self.story_context.memory( + "draft", + session_id=session_id, + ) + or "none" + ) story_editor_prompt = story_editor_prompt_template.substitute( draft=draft, @@ -400,6 +403,7 @@ class StoryEditorOutput(BaseModel): """ Response from the story editor """ + new_draft: str message: str request_animation: bool @@ -407,10 +411,10 @@ class StoryEditorOutput(BaseModel): response = self.story_editor( prompt=story_editor_prompt, id=session_id, - #input_schema=CreatorInput, + # input_schema=CreatorInput, output_schema=StoryEditorOutput, model="gpt-4-1106-preview", - #model="gpt-3.5-turbo", + # model="gpt-3.5-turbo", ) draft = self.story_context.memory( @@ -418,11 +422,11 @@ class StoryEditorOutput(BaseModel): session_id=session_id, value=response.get("new_draft"), ) - + request_animation = response.get("request_animation", False) message_out = response.get("message") - + if request_animation: characterIds = [characters[c].character_id for c in characters] @@ -434,7 +438,7 @@ class StoryEditorOutput(BaseModel): } else: - message_out += "\n\nHere is the current working draft:\n\n"+draft + message_out += "\n\nHere is the current working draft:\n\n" + draft config = None message_in = message.message @@ -444,14 +448,10 @@ class StoryEditorOutput(BaseModel): user_message = ChatMessage(role="user", content=message_in) assistant_message = ChatMessage(role="assistant", content=message_out) - output = { - "message": message_out, - "config": config - } + output = {"message": message_out, "config": config} return output, user_message, assistant_message - def __call__( self, message, @@ -491,17 +491,18 @@ def __call__( system=self.chat_prompt, params=self.chat_params, ) - + function = None if self.router_prompt: index = self._route_(message, session_id=session_id) function = self.function_map.get(index) - + if not function: function = self.function_map.get("1") output, user_message, assistant_message = function( - message, session_id=session_id + message, + session_id=session_id, ) self.router.add_messages(user_message, assistant_message, id=session_id) @@ -510,7 +511,7 @@ def __call__( self.story_context.add_messages(user_message, assistant_message, id=session_id) self.qa.add_messages(user_message, assistant_message, id=session_id) self.chat.add_messages(user_message, assistant_message, id=session_id) - + return output @@ -527,7 +528,7 @@ def sync(self): """ Sync the character data from the database """ - + character_data = get_character_data(self.character_id) logos_data = character_data.get("logosData") name = character_data.get("name") @@ -537,7 +538,9 @@ def sync(self): concept = logos_data.get("concept") abilities = logos_data.get("abilities") creation_enabled = abilities.get("creations", False) if abilities else False - story_creation_enabled = abilities.get("story_creations", False) if abilities else False + story_creation_enabled = ( + abilities.get("story_creations", False) if abilities else False + ) smart_reply = abilities.get("smart_reply", False) if abilities else False chat_model = logos_data.get("chatModel", "gpt-4-1106-preview") image = character_data.get("image") diff --git a/app/creation_interfaces/kojii_chebel.py b/app/creation_interfaces/kojii_chebel.py index 3a9aeb6..c0fba97 100644 --- a/app/creation_interfaces/kojii_chebel.py +++ b/app/creation_interfaces/kojii_chebel.py @@ -10,18 +10,22 @@ class Number(Enum): one = "one" many = "many" + class AspectRatio(Enum): portrait = "portrait" landscape = "landscape" + class ColorType(Enum): color = "color" black_and_white = "black and white" + class KojiiChebelRequest(BaseModel): """ A request for Chebel Kojii endpoint """ + number: Number aspect_ratio: AspectRatio abstract: float @@ -36,25 +40,29 @@ def kojii_chebel(request: KojiiChebelRequest, callback=None): negative_prompt = "saturated" else: color_mode = "black and white" - negative_prompt = "color, colors, yellow, orange, red, pink, purple, blue, green" - + negative_prompt = ( + "color, colors, yellow, orange, red, pink, purple, blue, green" + ) + if request.number == Number.one: prompt = f"in the style of , oil paint, {color_mode}, woman, dance, brush strokes" elif request.number == Number.many: prompt = f"in the style of , oil paint, {color_mode}, women, dance, brush strokes" - + if request.aspect_ratio == AspectRatio.portrait: w, h = 1024, 1536 elif request.aspect_ratio == AspectRatio.landscape: w, h = 1536, 1024 - + control_image = random.choice( - control_images[request.number.value][request.aspect_ratio.value] + control_images[request.number.value][request.aspect_ratio.value], ) control_image_strength = 0.95 guidance_scale = 6.5 - negative_prompt += ", ugly, watermark, text, tiling, blurred, grainy, signature, cut off, draft" + negative_prompt += ( + ", ugly, watermark, text, tiling, blurred, grainy, signature, cut off, draft" + ) config = { "mode": "controlnet", @@ -63,7 +71,7 @@ def kojii_chebel(request: KojiiChebelRequest, callback=None): "uc_text": negative_prompt, "lora": "https://edenartlab-prod-data.s3.us-east-1.amazonaws.com/431ff8fb8edf1fcf8d1bc1ddcc2662479ced491c6b98784cdb4b0aa6d70cd09c.tar", "lora_scale": lora_scale, - "control_image": f'https://edenartlab-prod-data.s3.us-east-1.amazonaws.com/{control_image}', + "control_image": f"https://edenartlab-prod-data.s3.us-east-1.amazonaws.com/{control_image}", "control_image_strength": control_image_strength, "width": w, "height": h, @@ -73,238 +81,238 @@ def kojii_chebel(request: KojiiChebelRequest, callback=None): "steps": 42, "seed": random.randint(0, 1000000), } - + image_url, thumbnail_url = replicate.sdxl( config, - model_version="abraham-ai/eden-sd-pipelines-sdxl:a541eb2e611fc66ccd0faac634a7f7c0c7684977b7b54d0eb599ff352f343884" + model_version="abraham-ai/eden-sd-pipelines-sdxl:a541eb2e611fc66ccd0faac634a7f7c0c7684977b7b54d0eb599ff352f343884", ) return image_url, thumbnail_url control_images = { - "one": { - "landscape": [ - "7bd9892213aad252f91a9c1a4f678a0ef9c9060682c5de31780ebbbaf751afa6.jpg", - "b3e536e51c7f3eb5b256f8e3bf740c1a38e9f9c4f8a7e4a85f461b0756e5013a.jpg", - "bc167fd234c3c64cee3c575d08f70a1cb4c294cf3f8cf0608225ec6957fe6b07.jpg", - "0a3ec22a9c01aae19bbbcd844cb3a09b9026fc142ce475d1a3c0acd4490e7f95.jpg", - "cdc9b1538c7ae8e0f5937ada79290a1fa686ae6de897e615a6b480f24f78b639.jpg", - "32c8821632ec29bf076c81f95f3c8c6cd8cb78a0f3e23eec685ca2fee2c993d6.jpg", - "6dffc79ec9a1ded80eb1c4d344614f531a7d9f179733e200411407ebe024b74d.jpg", - "02d8c48b76747f5ba05fbcc7e259e123eb75d606cceb8becdcb23b59be7169bb.jpg", - "f78899a5676c61480104161564e0f2b47abf32bc537d4f1ae0c4ecfaded7f339.jpg", - "4444eee464e11f7bbd9a8fdaeec07360368fc30ab5973cc83d9309c13a19185a.jpg", - "5ab8cb1db96f932d69221d91ed89f1703d260f6d99997d94dcbb8fa8192c6c2d.jpg", - "8a6f3aa6ada415f0e1d1ad0c0590d7baf8fe0542af6da88514daf9644cd76062.jpg", - "aeb8dd4096082e9b2ec38e74626f9258efdb04ae7b75f1fa7afe3a70c6a3cb93.jpg", - "2baa2427f7994ce185ddadf1331e800d208fea2b68e5c5d43b56acad2194f715.jpg", - "1cae6e6b15c9c19b7317a6c899fdd8c8a9802fdfc9aaf61acc160be31216aa75.jpg", - "d63f5769217a8b5d071294b0a14f2e6d34f4b4a79397ae062810841c36759a82.jpg", - "cf7cb38faea5ca81b970564282d16f3ec56112ca0b7499b3af9167b0ea6e1b9a.jpg", - "008816a11d388d211c2dc734577d80465ff3719864874353c90d24fb9046a969.jpg", - "d855ea3d1f62ca8f6c5eeef3349106901084e45c94afa5426350c71bb3e7b2c9.jpg", - "ff0528b4b2b64c20f80a6c047b85f80de7734c5ada6444165199ed513fd275c6.jpg", - "520b020c7ac04979eb68724ebdb75ef4cdf7d21af9ff7b4a34e4a277958aba68.jpg", - "5a8bc0fa965fe4b6421e4a3d76f52213f2635d6204c5ad2cfcd92fc4590d9a45.jpg", - "fa37fcbe28e6022ff9c238313f63be037946aeacffb9d2a9b0af40956b755bc2.jpg", - "8d59633a3d5b2d92ee3c02c6dd4db083f9ba8b515c0ba2a521eb91eb15cd31bc.jpg", - "11ac91c70f892a0f393a1b773d1251d099bd7724814417e4f8ff808838423a33.jpg", - "f66d92392f95b970c64013210bc29aa5a1ac0e385a81dc6827c53c1825237689.jpg", - "f6a9d7a2c3cbe19081644aee5f7f03ad85f9062f06ea647658006e8f4d313e5b.jpg", - "9ea9b48da25670cf1f3027095c52fd5a4fa076dcec15d54fb3380ad4225ddb24.jpg", - "d3979bbec7f2414e5e8643a23e45b4676b9016bb7944a5ee9ab3286ff715621e.jpg", - "b86ebebfa023d776cf66f8117aad03032344205ad344a2f84180da96879af09f.jpg", - "75d5b41c26e45573c48b88f42901dcf9eafbcf5a7b6a4e6ac9a70b819b2a54af.jpg", - "b2433891140cbc62828954fda4f8fea78ed1688d5b72f5bd73efdfce50e60c7d.jpg", - "84a80e2ba46bfaac52ce6ab47d6b74e86973a8cbe9b9a0e0a7750f7a14e9eeb7.jpg", - "51ffbeef0e328bd389d0a995a9affc5d99ecb813e6ec3e7f92a8d2d65223524e.jpg", - "962212572adbc1c2431c15e743c3b7a5c476f1941ee2ac7a991829717073a675.jpg", - "6da8ea6e89ea912f02d9dee1603e597099990e1a15ce7ddd39767c04ae7791a7.jpg", - "cf7638aea862c976144966a89d07c14515385cb7099b675421b0a0df7b8d2a42.jpg", - "3232bbf53d61231b286102f33d0451006ed31849e2474c58093b6fe947631d19.jpg", - "6f75af9697aee84680b55952923d161ca79101d61942888673f6e35a424b0298.jpg", - "8bf9d95c87c10243a18171ccf13d3ef4ca300f33699b1258f999165f79c1faa8.jpg", - "ec267b2936503f3af56cd16b0b5af4e13966097207564cb77687f44f0f23f439.jpg", - "7327928fd90a4b8306ef0bd89f8b8b8927122ae31f4bedbbcd4ce76474084bc6.jpg", - "a9ebfa67e258c7bb2df5ffb3e9fff837dc0228d34a1528052c94a57b5784ac26.jpg", - "7bf87d3df1348ce5a850ac1cec5e2ccb37b6853e6233ece97d96c1f5e35e64d7.jpg" - ], - "portrait": [ - "bb9d0983e89491dfd18eca8f7157715f19f6513ec56f031398ca1786ce75855c.jpg", - "fe8aff11ad70f030a093b25142b9a079764d538374ed128f00250f5b5af6ec73.jpg", - "2e4a118c0d7eb7147c35104ab56a69d4c8642927be4f96dfb402955be2603053.jpg", - "b703398037ff1a9b2c03e3bb6e35ac0f5b50f087b07ed76cdc885af8cb60ec91.jpg", - "ce6753f069d4615e415a53d63c09469eb0c749c459107b02d25f67bd8bc1f97a.jpg", - "dea5510c9f78ff47e38f3aa8a5799874448abe6024ebfa4ad377aaf4d4308adb.jpg", - "3c50e2fd251325a273f9aad6bbcb382b1be674299b1fe55be6bb3e9b5ad04ca0.jpg", - "a194cf9d545f9965be86bdbc6f34b9b0b74546a04f5fa9760bda511f7a97fb7c.jpg", - "a762dcd6878ec33edc76632bd777de18f825106cba0e5d6c6d30d94b526a05ea.jpg", - "9d32e406190afab64254db9b2d59d64a5a63ae6e6c53d65b6ccc05e0736c67e5.jpg", - "0bbcd41d3c972bfd15f4b83a9a63ef342c178f8a38e7439048510527532f3cba.jpg", - "915d636c728c0d15119fca38d1296c6d240045df31691d9ab34debfecd6d3833.jpg", - "08bf6de26eb97d66cea3fc40a87a22cf9e970c874263b45a382c57820fe6ad4c.jpg", - "cf06c74c5dae4e08bcf98670dbe7d71f1e66574d35a1ea80244e3353f3a8a6b3.jpg", - "d7255857bba8c7cfb3f3f4104b284d2e0292ea536716eea74a3e4a247077f396.jpg", - "43059dae688b95425591cd22a7e6b00f3546c4cebfd6a3f8b7a23a007402265c.jpg", - "5be4869cedc73bf35a9a1d311880793405b1b929c2df1002bfadbfcef119769c.jpg", - "34f7be53ddb584ed7d3afa2f2c3cb68ce20148a5af9f2a39d6e05ac48c5bff64.jpg", - "afdf69fccc18b9659fe5a229511fe4dbc30622677e3fdf8bfe2276b0b02ed7b4.jpg", - "9947627548c00e8b82acc8f0dfc56c022cdee1b9192aae8d840e1c79b8e51424.jpg", - "294eb7357f5cfc1eaaa3a8cb2062b84b8db1e368931b67fd8e1148e77726883a.jpg", - "81cff9ddd2f835bf6ca3725229f508b8da0976b9d7a092a06bd93fdeff56ee5b.jpg", - "debfbbd9c90ceb3474c3bd19e4ecf7c57c958e90cd25b915ae085b366a8db24d.jpg", - "568d0b3deda19a47dceef35d4527d5a9d4e61cfa9782447841ce2cf8298c2f62.jpg", - "3b7556d1c5aff809b67ea957c85b304727f29cdaf62568e2713d9d493014aba3.jpg", - "a79bd60a069459b5c29bd2bfef44da077fea42dac02b97d1ff2d516afa24a10a.jpg", - "b5175e269a155ca86e50aab356a2a9bbbc3b5f5848bdb74edbfda52f8d5bdff9.jpg", - "7ef81b92d7602345b2fc2d1bde52741f453f82a0a08c3881281d9a928a8b53fa.jpg", - "3a17448b0f54cf4e6381cc934731a2734af31debe443e14a6c9dd68fbd2d2db4.jpg", - "1b19b139a447fecb2a4692d1541faf5d9465f1c9370b683ae9bfaec43247f39a.jpg", - "565e6463e2dba76377b13dfda546cb76b8bb2eb7d7228f7e2191ef4bf6f6a721.jpg", - "5badf0e3da37b5945de15a04a5ca1d05bcb9e25bf50aa52b00580604cdaaf326.jpg", - "bba9cf8d6b0b2afcc07d4e286b1897285bb69428a881b42815a1127c9a579053.jpg", - "1976f2c658e1672eaf54c466a3d5d66bcbacd1fed259e850dc597abda0d23143.jpg", - "8327b4776603bcd36cf76d60389b71413fe31cd838e243a63db791ab4f1c1851.jpg", - "1142850a7535b20cefdf36379474e1da06986e073c01ba9c6ab2e28ebf0bfba8.jpg", - "3777f4bab36ba99c71ea3ec71022ee30045f679fb4c088e9743c701606bd35f8.jpg", - "296476fd4148b07f6346417d921047a48e7c85656942d0e9902645924339a7c3.jpg", - "0859e28957c571d0775aab681b710d1eaa58a2de8effcde104093829108ab141.jpg", - "8226a0f776654e02c36fd0eba51f595f8ac7132984b36a1c2802a6aad6060dd3.jpg", - "873fafbf9bdd288aa3b20fc22fe7b38e896bc6874722e1ad5b9dadfd51f2452b.jpg", - "d6cf98d0907a555b3ac88db1e62fdd0a509d9738179daf307a9460e0993fbbbd.jpg" - ] - }, - "many": { - "landscape": [ - "05641ef30421cd6a3b325a8a33500b22c56ff7f2b664ac8eba169c48fe259dc2.jpg", - "c2042054e38cc20eaec09e19fb601e9b78a12d1a04d7575247316ddfbd392951.jpg", - "b61676aa9ebaf41f5c96a3b8304d84e29717ec897aff377dda1a6f8291d461a6.jpg", - "b6c4b00df3909712cf31797ae4158332939b35a2b5c5de1b8bcddead81649fe5.jpg", - "5b077b165933f80dd63000416a90d688310562f61ae9322fcdbd51962630c4f3.jpg", - "d05861f351375141cb83d12e330509f2bf12af3def333ee3537c07ba046cb370.jpg", - "dd1a75d46c555219a4f33dd4322b9a2c98e8ec4c9a2d46d4b96de41e8f210d81.jpg", - "ad74b11bb850ae0e4ce1fc258db0f4f206260103ac9103d75378240325019042.jpg", - "c281264ecac3352179406305110faf63d8f4ac8c4a3bfda17b7e5bf9a29a04ae.jpg", - "ff1478b21b0d9253fdc9a7513f8a48163ef6e8bdd74cd29b0d14efe0226c946f.jpg", - "089ad34db78dd4ba40a898fe5f182cc78e4baee54720127fca8b83b162a35fa7.jpg", - "1924e6677892c41b8bf5faa53a94932f415c523e3e8252a9c6e9e647eb532540.jpg", - "728a4d7728b1b954df70d34de20af6a4e0d626743fb1cb3a287e94a2a265c132.jpg", - "c5b2ff6d278ae1ba1bc73577ef5d104c96f24ce8f01cf2870459b15e39aa621f.jpg", - "f6d8fcdc4973dd7920672cbf0e2e6cb4353024d39af986cecd00a5923443cdec.jpg", - "71f98760c2e99777513784c2058d7287ec1c5d91bee9584fc261ac965cae10a2.jpg", - "58bbec94c6c08179576f57861dc1476b80133963df9b9ff0aa884073ee315d36.jpg", - "4729eb99bc925336c78391fffd6f873308117f546737accb528e9e3825865e97.jpg", - "3e824c591706940669b9e40d7aa2b987149c8eaa0650a31c984409d21278cdd3.jpg", - "51529aee4dcd95f70bdd79eb4e16544515905ba61a2e1e4fb6db74ea22bef969.jpg", - "3de238ddb2c8041688b49b23bf70e18507738caa68e7d106d120d6e45813f049.jpg", - "dd0653bcc4572efd0c12627cfe48c810c99a57622eb9fb1b52ec2299df813678.jpg", - "73cb10c4c09c1bf0d4723cb5a32aaade49f081cf9677db27f6b2bdeb64722b0c.jpg", - "b6b9661466e37e4d40d6548fc031983c9dbe83f5799d8fd49c5b199cd9cae468.jpg", - "25bcc021607aad141184ae75f0dbc4e78ccdeb1af04a0349d57f06c70e9c2154.jpg", - "156d472ccc9f2f0326cb2189e4e69e0434fce43696904c60ec42bbc688cfeab6.jpg", - "e8631010c2c0fc42614565245ec081d1b670d9444cfe6fbea89ba389873298bc.jpg", - "4ec00fe8545f42ce2e15b11e741d3d7103ab5c3a8d188a9bd177fb5706ae35c9.jpg", - "c746a655f50517d01fe02331a844613efe62e2f6b5e26ad208cdeae624a84cbb.jpg", - "d862bcaf3473945aaac288d007b7872d9e3c2514256ab8f9345d40c0b21a06ca.jpg", - "68c1e661c825bd4a0f891a6a02e20037faedea36b4c02466ee8364023761e882.jpg", - "da83313c9967f80ffa00a20a22906fdca020e5c6f5f27120c099c63ed70c217c.jpg", - "f464606e7151831b1aae45ddf8a2113b8dbd8b2ae3c1ac84fa7f7b060d9e3e55.jpg", - "4188eb3b8b9644d6f4f2b43f3ebc5f0fddef228b26fc299bbb88105f9b6ab1ce.jpg", - "06f66eda10a8625911366266f894bd80cec12a42dfdc858b3a1f120dbe7a72e6.jpg", - "bd9c8dd95733dff98d05a5f79aeb8e47d11cfc11f9f81976deaf5ec88cac93cb.jpg", - "8dc132ab7c3fb6ee1a9a58d1381349433e15bbff288f98fb4d93dd41c143067c.jpg", - "a0623764df54c930aaab12081af85bac527a7a4d63fb93297c7872f961ffcd79.jpg", - "e0f45fa9d5243be43662faa9273799a9ebc8bc83f36fe51e5096e42043b6b633.jpg", - "5f7c73ea5acf1fcbdfb4c37d1132ab54e4554b7d71ae04e38caf004d9eed8ae1.jpg", - "cc7563217d8f3b3afb35b6b6556fb7eede3efe935f07d714b65cdac72e9e811c.jpg", - "13b1cbfb97752b36ff4f0948dc2c82643f5404bc71269e0e32b89b10665b02b4.jpg", - "163f32cb04b95a761c66404124e86a68d3e5138e65c30fdd00b53e07d080b4f5.jpg", - "354bccf88468e0f06ea06c973ecc227a32bd2ccd3dd7f40e47b6a4524ab7ee8c.jpg", - "b93a172dfc2932c48c41d0a69217656c654dc8b20e40dd24f2da2ee69babb794.jpg", - "8696872ae4710d6057eed1847fef5a47ea3170134c536dfa8ff9827ae48d03fc.jpg", - "968229c57062378ef7a066c9d6b5216dfce6f68f7718897bbfe2f440802272dd.jpg", - "582f4ab218aeb6d372eb569dfd85e2402a03a5c57b6f7c551720f4d372f57791.jpg", - "9a042652a4522eb7062c0868f1fb3e986f27f32bcb4ec3d44f468a695b58d3db.jpg", - "19c6e813f39b865a0fab018c561f94ceff72cbbbe760d298b3837edc0977aff8.jpg", - "8a9705f8d278606e4de21a855d899d8388f459df065d9b1cfece1810a8c9c2ac.jpg", - "be61c3b17109d88cdb3a53301d1dafa248d313a27b751da14ddf40631442e6c6.jpg", - "d04315142f95a53d0aeb929cd8ef977dd871b12256e2ae654ebc99b7c356df5b.jpg", - "06ce78c9f3677a201ad456c0d90f2cc4a856547991d3a5588f4fd28ce9fee3bb.jpg", - "45fea6bad88dff18fde37b144ecc387b86fd26932d88cfcd4c65e8b586c97b86.jpg", - "87ea173c679785a160710fd1bf04ab9bf9b93dca01a4c479c86ea9c43e2d5f76.jpg", - "33c0e6456fe7a97615db555995a76d0b39de2346ea11298ec225ebb16381f42f.jpg", - "e0c305bbbe79a7d9553d41d0fc2bf6fdb99d559bfbea73dbf7ab482cdc054d35.jpg", - "d8e0f64d379dbeae6ebb16d128cf3a020f14d7392e07907e74d9c8bc3ee552af.jpg", - "c210d6d99e6d81a96d681349983e9f30e624acb49b240a938bab256eaf3b6e22.jpg", - "852e2df979af0ca1d1c01eced84db84aa73667323f6c3252029847fc4dd3da49.jpg", - "a256cbefe27a435c9be482ca6c1cfffdd5093c681fbc9abc518529690fff77ee.jpg", - "a34d74d2c481189eba7732826f372fdbaf83b01a3fc61069dd2ea5ca252ec208.jpg", - "8e366993f71e6c790782bfdc6242d57f60d5e067b11927c407f75923a293e0b1.jpg", - "b220d46d819d9f032833e1557a5e92cc4e035fcfdd2d9d779aca7ff48720b17c.jpg", - "97d8d3be8baaaefc27088770b3cdaec45b1b9d66a5b066fd8b17c14ad64fb595.jpg", - "01eb1ad0b6c4b91fefc45376d7d1b696809258e4a46b4cdbd736695359a7df2f.jpg", - "0c652983da87188842dc0f7bb1b96bc94263c8efdb52b5b33b214e235233b3f0.jpg", - "6c6b42d7252586af098e828db8a033515ff5a734078d2d37882fa2e1193c7e0a.jpg", - "c3dcd27ed77be77be101f7a57d83e069bba4a8622720122eb90d45d6a88ff8bd.jpg", - "b809c72bc4e20d979811a16f830e412418c0cd93e4fccb93f3316116f83f0cfe.jpg", - "632615c4222e05010474488761ab0c78c5215e6c9eb0f7ce3b2d92766d34248f.jpg", - "0b68c73f2375ccf8ca8a93012ad010a6d13052bdf390ff0e2a0a5e178cc89d59.jpg", - "1e9f3b322bc12784ef49e47481ec1ecd3d0f163c98692436e42ee80c615495f3.jpg", - "3a7ec5965c85d68c6e603dad8372001d3ab9adf974a8a44d5e286fe87d7b603b.jpg", - "f18334e241198b2ac25be1e80fffefae64f59cf936b9e9998521f77eb5c21df1.jpg", - "bd68d4ebbe38f4990c6089de669994948c1c41c504a9031bf53b2777482bb01c.jpg", - "d845ad2ef885883457f837b6858982e99125480b8eca56d6c87887e8ad1b2429.jpg", - "9232e5f090a9309f09ba7f41981d8e247ffd018f86166a62b4b5231ac54a97ca.jpg", - "3a73caa7212ea590aa6d9d018d1507ff6dacc0c564ea461d15752d860b9524b9.jpg", - "6f8967985eb5d9fb4add6b942b5f9047a7c89bd0be0877a345afcffe2c8b160c.jpg", - "3e14e6687fb4ad24ee763bb9c85753f394a99d7eefb1b01ab7a4943d3f9d8d41.jpg", - "3b21135248a95cbf4e011f214fb7552d0d5d30dabbe0e7df6e8b1ad22a0edc51.jpg", - "c89482acf334da6b1bbd4700eee7846f21c7a99c05fe3585fd19aed4251d8932.jpg", - "a3885de0e4c1a2768c06ad48bdbca21b4c1642ff2ed2d3624e31f108eb7dc612.jpg", - "a4c00ea9d3e3fefeed2e8d3cb3c3620f4ce95788ce2b297161b27bd66312b183.jpg", - "4340dccfe5d6cc0c58257bb562984b0aea2001021b2bd2cdd8b1636330a19560.jpg", - "7420054f462c6b1ca34205c90c031c631c618e8b56efec6e77179db99395073e.jpg", - "12cfa907007cba10d71ef01a7003fb14b8759da0fb05d51e5d6c695d1d88fe6d.jpg", - "bf1eb9ee9c52995d3c722e4bd8d6c82a80aa493404ce2f38725ba61e9a50b494.jpg" - ], - "portrait": [ - "276da8f458841b376269f6537d703aecb5848158ccf5b84671c0a62a3d7a30bf.jpg", - "bab121cf00d898d783c998c2037d15ca8e52fa2e8d5d1a4e9dfdaf1b8c0bffab.jpg", - "00c83ef50e52aaf3744e11a9a7531d5adcf8730a2be5eab343e9d02131e460b2.jpg", - "a7138efeca57d652816f92583359d5058840b2525f19b7809eb20386d115341f.jpg", - "d112bb812787ed46408a30bdc153e9646901223f077f893820b789ac4dc79475.jpg", - "3ad93eaaeb5a824525cc4aa3234cf86e7a566a21b4cc605a1b1c7342f68bdede.jpg", - "bc0cccfc19ce432bc758cfba2181a84ef313dacb8fb9464e0c4daecea9c73295.jpg", - "0c54668d3354bbd8b340813984c579b0fae5659dc1210a2f5bb6b6af39602ebb.jpg", - "8f7cdf7684a6b846e9389bee5cb820eec0e69b02c05d89cf8a4c930c2f70ef26.jpg", - "c95e641fa2ba49458d8204cc3df0fa6ee9734896265acddc5688c0ac074ef713.jpg", - "886bb33fa2e797473becf227971c9274c8d9b70bf398da3e7e908ec4a81df41f.jpg", - "2a49c3f36c077439838a07a35ebf8a26dcb57b563ad9b78032e665b72ccfac38.jpg", - "05dd446c74f7dd04f805144ce163ed40c37e615fd0851837fce6b965a6f83ace.jpg", - "35a24c0c4e080efa77a7000d1bd5dbf3e7ab397d49c11e9f1302269588b0c9ec.jpg", - "6f9a292bbe13276dfdfeecf608c763ca6a58ac7b0cc93038e9bf0ee1149cc63c.jpg", - "41d0accf0e06703499d690a13a227a45eda8a69e7cbc0fb3e390716e6cd62276.jpg", - "23d137f0d638f749e8f4d79741c1e6fee155379626e788f970f1cd48a3f4e9d2.jpg", - "1514de8dc90e3dff7270699e1c873f314491b17f3bad1581b1422438e4c473b3.jpg", - "6f7f4a1a3e7582d19c49d31e4a80aaef3aa7c57629cbbc7de098a2ece70f8001.jpg", - "1e1509d548f8e6bbc0ba0a52af856fd5adc5e71805e31c1420914a3538ac8c66.jpg", - "8fa1358c6ed1fa70f67f2c2a75032d431bb948f2c2badd7d7bf5100106946ed1.jpg", - "d554a32a0d481a6b8229239378583596bc7b648dc080085ef30c24ada95a7842.jpg", - "d14d9b2869cfb891dd3863e7303094c599449c834b9946af4e2d7e6cdd634716.jpg", - "c714150314ca54fbcb077f465c0aca77e34cfcb8353c1511fab81a626548aba0.jpg", - "4deb6d90c4ba0060591991e23d0250b05d1dd89dbf4fa377add1c53f9c03d4eb.jpg", - "198cd903c801fd36e84a6f17cbb3721a9350e7c508d4850e4d1ac66042edd761.jpg", - "9d56a26c49ec07e1bfe0d561be3242b81b1b46099ec48abe226efbaf3213eaa4.jpg", - "9397b3ca92defb29cfe575ee52e7f27301a95d8f8fe3e5289401866ddc7e8e33.jpg", - "50d7ae5df425bbc30c9f4cad711c05e444ac692961f298caae063333ecbcceca.jpg", - "5b438777bf0684d3b9ffd9b369b0dca7b7c71a0ac985f161158146fec9b1eee0.jpg", - "06e08abe7113a0dac56a9ff4c4a9f01271bb34f68bd0663689652c5026db5045.jpg", - "3db4a5246b3b4667de126b689763720bd48866f3ab81fc0fc233dea5759a2c39.jpg", - "6f3d0a1b111f850650efc9798de985791a10e339c780cd4bc8b71ab6ce2145ba.jpg", - "e4a15e6b861ececfbe19cfac7d772f6260a0452de15a980ecd93baebfc7ebd25.jpg", - "436f5a8722ed0166c22a6463902d4a533b67cddddecade1c336a90d87be23793.jpg", - "5d7e6ccbef63a704ef144ee495a04b802c03e5168aad20003596385ebddd063c.jpg" - ] - } -} \ No newline at end of file + "one": { + "landscape": [ + "7bd9892213aad252f91a9c1a4f678a0ef9c9060682c5de31780ebbbaf751afa6.jpg", + "b3e536e51c7f3eb5b256f8e3bf740c1a38e9f9c4f8a7e4a85f461b0756e5013a.jpg", + "bc167fd234c3c64cee3c575d08f70a1cb4c294cf3f8cf0608225ec6957fe6b07.jpg", + "0a3ec22a9c01aae19bbbcd844cb3a09b9026fc142ce475d1a3c0acd4490e7f95.jpg", + "cdc9b1538c7ae8e0f5937ada79290a1fa686ae6de897e615a6b480f24f78b639.jpg", + "32c8821632ec29bf076c81f95f3c8c6cd8cb78a0f3e23eec685ca2fee2c993d6.jpg", + "6dffc79ec9a1ded80eb1c4d344614f531a7d9f179733e200411407ebe024b74d.jpg", + "02d8c48b76747f5ba05fbcc7e259e123eb75d606cceb8becdcb23b59be7169bb.jpg", + "f78899a5676c61480104161564e0f2b47abf32bc537d4f1ae0c4ecfaded7f339.jpg", + "4444eee464e11f7bbd9a8fdaeec07360368fc30ab5973cc83d9309c13a19185a.jpg", + "5ab8cb1db96f932d69221d91ed89f1703d260f6d99997d94dcbb8fa8192c6c2d.jpg", + "8a6f3aa6ada415f0e1d1ad0c0590d7baf8fe0542af6da88514daf9644cd76062.jpg", + "aeb8dd4096082e9b2ec38e74626f9258efdb04ae7b75f1fa7afe3a70c6a3cb93.jpg", + "2baa2427f7994ce185ddadf1331e800d208fea2b68e5c5d43b56acad2194f715.jpg", + "1cae6e6b15c9c19b7317a6c899fdd8c8a9802fdfc9aaf61acc160be31216aa75.jpg", + "d63f5769217a8b5d071294b0a14f2e6d34f4b4a79397ae062810841c36759a82.jpg", + "cf7cb38faea5ca81b970564282d16f3ec56112ca0b7499b3af9167b0ea6e1b9a.jpg", + "008816a11d388d211c2dc734577d80465ff3719864874353c90d24fb9046a969.jpg", + "d855ea3d1f62ca8f6c5eeef3349106901084e45c94afa5426350c71bb3e7b2c9.jpg", + "ff0528b4b2b64c20f80a6c047b85f80de7734c5ada6444165199ed513fd275c6.jpg", + "520b020c7ac04979eb68724ebdb75ef4cdf7d21af9ff7b4a34e4a277958aba68.jpg", + "5a8bc0fa965fe4b6421e4a3d76f52213f2635d6204c5ad2cfcd92fc4590d9a45.jpg", + "fa37fcbe28e6022ff9c238313f63be037946aeacffb9d2a9b0af40956b755bc2.jpg", + "8d59633a3d5b2d92ee3c02c6dd4db083f9ba8b515c0ba2a521eb91eb15cd31bc.jpg", + "11ac91c70f892a0f393a1b773d1251d099bd7724814417e4f8ff808838423a33.jpg", + "f66d92392f95b970c64013210bc29aa5a1ac0e385a81dc6827c53c1825237689.jpg", + "f6a9d7a2c3cbe19081644aee5f7f03ad85f9062f06ea647658006e8f4d313e5b.jpg", + "9ea9b48da25670cf1f3027095c52fd5a4fa076dcec15d54fb3380ad4225ddb24.jpg", + "d3979bbec7f2414e5e8643a23e45b4676b9016bb7944a5ee9ab3286ff715621e.jpg", + "b86ebebfa023d776cf66f8117aad03032344205ad344a2f84180da96879af09f.jpg", + "75d5b41c26e45573c48b88f42901dcf9eafbcf5a7b6a4e6ac9a70b819b2a54af.jpg", + "b2433891140cbc62828954fda4f8fea78ed1688d5b72f5bd73efdfce50e60c7d.jpg", + "84a80e2ba46bfaac52ce6ab47d6b74e86973a8cbe9b9a0e0a7750f7a14e9eeb7.jpg", + "51ffbeef0e328bd389d0a995a9affc5d99ecb813e6ec3e7f92a8d2d65223524e.jpg", + "962212572adbc1c2431c15e743c3b7a5c476f1941ee2ac7a991829717073a675.jpg", + "6da8ea6e89ea912f02d9dee1603e597099990e1a15ce7ddd39767c04ae7791a7.jpg", + "cf7638aea862c976144966a89d07c14515385cb7099b675421b0a0df7b8d2a42.jpg", + "3232bbf53d61231b286102f33d0451006ed31849e2474c58093b6fe947631d19.jpg", + "6f75af9697aee84680b55952923d161ca79101d61942888673f6e35a424b0298.jpg", + "8bf9d95c87c10243a18171ccf13d3ef4ca300f33699b1258f999165f79c1faa8.jpg", + "ec267b2936503f3af56cd16b0b5af4e13966097207564cb77687f44f0f23f439.jpg", + "7327928fd90a4b8306ef0bd89f8b8b8927122ae31f4bedbbcd4ce76474084bc6.jpg", + "a9ebfa67e258c7bb2df5ffb3e9fff837dc0228d34a1528052c94a57b5784ac26.jpg", + "7bf87d3df1348ce5a850ac1cec5e2ccb37b6853e6233ece97d96c1f5e35e64d7.jpg", + ], + "portrait": [ + "bb9d0983e89491dfd18eca8f7157715f19f6513ec56f031398ca1786ce75855c.jpg", + "fe8aff11ad70f030a093b25142b9a079764d538374ed128f00250f5b5af6ec73.jpg", + "2e4a118c0d7eb7147c35104ab56a69d4c8642927be4f96dfb402955be2603053.jpg", + "b703398037ff1a9b2c03e3bb6e35ac0f5b50f087b07ed76cdc885af8cb60ec91.jpg", + "ce6753f069d4615e415a53d63c09469eb0c749c459107b02d25f67bd8bc1f97a.jpg", + "dea5510c9f78ff47e38f3aa8a5799874448abe6024ebfa4ad377aaf4d4308adb.jpg", + "3c50e2fd251325a273f9aad6bbcb382b1be674299b1fe55be6bb3e9b5ad04ca0.jpg", + "a194cf9d545f9965be86bdbc6f34b9b0b74546a04f5fa9760bda511f7a97fb7c.jpg", + "a762dcd6878ec33edc76632bd777de18f825106cba0e5d6c6d30d94b526a05ea.jpg", + "9d32e406190afab64254db9b2d59d64a5a63ae6e6c53d65b6ccc05e0736c67e5.jpg", + "0bbcd41d3c972bfd15f4b83a9a63ef342c178f8a38e7439048510527532f3cba.jpg", + "915d636c728c0d15119fca38d1296c6d240045df31691d9ab34debfecd6d3833.jpg", + "08bf6de26eb97d66cea3fc40a87a22cf9e970c874263b45a382c57820fe6ad4c.jpg", + "cf06c74c5dae4e08bcf98670dbe7d71f1e66574d35a1ea80244e3353f3a8a6b3.jpg", + "d7255857bba8c7cfb3f3f4104b284d2e0292ea536716eea74a3e4a247077f396.jpg", + "43059dae688b95425591cd22a7e6b00f3546c4cebfd6a3f8b7a23a007402265c.jpg", + "5be4869cedc73bf35a9a1d311880793405b1b929c2df1002bfadbfcef119769c.jpg", + "34f7be53ddb584ed7d3afa2f2c3cb68ce20148a5af9f2a39d6e05ac48c5bff64.jpg", + "afdf69fccc18b9659fe5a229511fe4dbc30622677e3fdf8bfe2276b0b02ed7b4.jpg", + "9947627548c00e8b82acc8f0dfc56c022cdee1b9192aae8d840e1c79b8e51424.jpg", + "294eb7357f5cfc1eaaa3a8cb2062b84b8db1e368931b67fd8e1148e77726883a.jpg", + "81cff9ddd2f835bf6ca3725229f508b8da0976b9d7a092a06bd93fdeff56ee5b.jpg", + "debfbbd9c90ceb3474c3bd19e4ecf7c57c958e90cd25b915ae085b366a8db24d.jpg", + "568d0b3deda19a47dceef35d4527d5a9d4e61cfa9782447841ce2cf8298c2f62.jpg", + "3b7556d1c5aff809b67ea957c85b304727f29cdaf62568e2713d9d493014aba3.jpg", + "a79bd60a069459b5c29bd2bfef44da077fea42dac02b97d1ff2d516afa24a10a.jpg", + "b5175e269a155ca86e50aab356a2a9bbbc3b5f5848bdb74edbfda52f8d5bdff9.jpg", + "7ef81b92d7602345b2fc2d1bde52741f453f82a0a08c3881281d9a928a8b53fa.jpg", + "3a17448b0f54cf4e6381cc934731a2734af31debe443e14a6c9dd68fbd2d2db4.jpg", + "1b19b139a447fecb2a4692d1541faf5d9465f1c9370b683ae9bfaec43247f39a.jpg", + "565e6463e2dba76377b13dfda546cb76b8bb2eb7d7228f7e2191ef4bf6f6a721.jpg", + "5badf0e3da37b5945de15a04a5ca1d05bcb9e25bf50aa52b00580604cdaaf326.jpg", + "bba9cf8d6b0b2afcc07d4e286b1897285bb69428a881b42815a1127c9a579053.jpg", + "1976f2c658e1672eaf54c466a3d5d66bcbacd1fed259e850dc597abda0d23143.jpg", + "8327b4776603bcd36cf76d60389b71413fe31cd838e243a63db791ab4f1c1851.jpg", + "1142850a7535b20cefdf36379474e1da06986e073c01ba9c6ab2e28ebf0bfba8.jpg", + "3777f4bab36ba99c71ea3ec71022ee30045f679fb4c088e9743c701606bd35f8.jpg", + "296476fd4148b07f6346417d921047a48e7c85656942d0e9902645924339a7c3.jpg", + "0859e28957c571d0775aab681b710d1eaa58a2de8effcde104093829108ab141.jpg", + "8226a0f776654e02c36fd0eba51f595f8ac7132984b36a1c2802a6aad6060dd3.jpg", + "873fafbf9bdd288aa3b20fc22fe7b38e896bc6874722e1ad5b9dadfd51f2452b.jpg", + "d6cf98d0907a555b3ac88db1e62fdd0a509d9738179daf307a9460e0993fbbbd.jpg", + ], + }, + "many": { + "landscape": [ + "05641ef30421cd6a3b325a8a33500b22c56ff7f2b664ac8eba169c48fe259dc2.jpg", + "c2042054e38cc20eaec09e19fb601e9b78a12d1a04d7575247316ddfbd392951.jpg", + "b61676aa9ebaf41f5c96a3b8304d84e29717ec897aff377dda1a6f8291d461a6.jpg", + "b6c4b00df3909712cf31797ae4158332939b35a2b5c5de1b8bcddead81649fe5.jpg", + "5b077b165933f80dd63000416a90d688310562f61ae9322fcdbd51962630c4f3.jpg", + "d05861f351375141cb83d12e330509f2bf12af3def333ee3537c07ba046cb370.jpg", + "dd1a75d46c555219a4f33dd4322b9a2c98e8ec4c9a2d46d4b96de41e8f210d81.jpg", + "ad74b11bb850ae0e4ce1fc258db0f4f206260103ac9103d75378240325019042.jpg", + "c281264ecac3352179406305110faf63d8f4ac8c4a3bfda17b7e5bf9a29a04ae.jpg", + "ff1478b21b0d9253fdc9a7513f8a48163ef6e8bdd74cd29b0d14efe0226c946f.jpg", + "089ad34db78dd4ba40a898fe5f182cc78e4baee54720127fca8b83b162a35fa7.jpg", + "1924e6677892c41b8bf5faa53a94932f415c523e3e8252a9c6e9e647eb532540.jpg", + "728a4d7728b1b954df70d34de20af6a4e0d626743fb1cb3a287e94a2a265c132.jpg", + "c5b2ff6d278ae1ba1bc73577ef5d104c96f24ce8f01cf2870459b15e39aa621f.jpg", + "f6d8fcdc4973dd7920672cbf0e2e6cb4353024d39af986cecd00a5923443cdec.jpg", + "71f98760c2e99777513784c2058d7287ec1c5d91bee9584fc261ac965cae10a2.jpg", + "58bbec94c6c08179576f57861dc1476b80133963df9b9ff0aa884073ee315d36.jpg", + "4729eb99bc925336c78391fffd6f873308117f546737accb528e9e3825865e97.jpg", + "3e824c591706940669b9e40d7aa2b987149c8eaa0650a31c984409d21278cdd3.jpg", + "51529aee4dcd95f70bdd79eb4e16544515905ba61a2e1e4fb6db74ea22bef969.jpg", + "3de238ddb2c8041688b49b23bf70e18507738caa68e7d106d120d6e45813f049.jpg", + "dd0653bcc4572efd0c12627cfe48c810c99a57622eb9fb1b52ec2299df813678.jpg", + "73cb10c4c09c1bf0d4723cb5a32aaade49f081cf9677db27f6b2bdeb64722b0c.jpg", + "b6b9661466e37e4d40d6548fc031983c9dbe83f5799d8fd49c5b199cd9cae468.jpg", + "25bcc021607aad141184ae75f0dbc4e78ccdeb1af04a0349d57f06c70e9c2154.jpg", + "156d472ccc9f2f0326cb2189e4e69e0434fce43696904c60ec42bbc688cfeab6.jpg", + "e8631010c2c0fc42614565245ec081d1b670d9444cfe6fbea89ba389873298bc.jpg", + "4ec00fe8545f42ce2e15b11e741d3d7103ab5c3a8d188a9bd177fb5706ae35c9.jpg", + "c746a655f50517d01fe02331a844613efe62e2f6b5e26ad208cdeae624a84cbb.jpg", + "d862bcaf3473945aaac288d007b7872d9e3c2514256ab8f9345d40c0b21a06ca.jpg", + "68c1e661c825bd4a0f891a6a02e20037faedea36b4c02466ee8364023761e882.jpg", + "da83313c9967f80ffa00a20a22906fdca020e5c6f5f27120c099c63ed70c217c.jpg", + "f464606e7151831b1aae45ddf8a2113b8dbd8b2ae3c1ac84fa7f7b060d9e3e55.jpg", + "4188eb3b8b9644d6f4f2b43f3ebc5f0fddef228b26fc299bbb88105f9b6ab1ce.jpg", + "06f66eda10a8625911366266f894bd80cec12a42dfdc858b3a1f120dbe7a72e6.jpg", + "bd9c8dd95733dff98d05a5f79aeb8e47d11cfc11f9f81976deaf5ec88cac93cb.jpg", + "8dc132ab7c3fb6ee1a9a58d1381349433e15bbff288f98fb4d93dd41c143067c.jpg", + "a0623764df54c930aaab12081af85bac527a7a4d63fb93297c7872f961ffcd79.jpg", + "e0f45fa9d5243be43662faa9273799a9ebc8bc83f36fe51e5096e42043b6b633.jpg", + "5f7c73ea5acf1fcbdfb4c37d1132ab54e4554b7d71ae04e38caf004d9eed8ae1.jpg", + "cc7563217d8f3b3afb35b6b6556fb7eede3efe935f07d714b65cdac72e9e811c.jpg", + "13b1cbfb97752b36ff4f0948dc2c82643f5404bc71269e0e32b89b10665b02b4.jpg", + "163f32cb04b95a761c66404124e86a68d3e5138e65c30fdd00b53e07d080b4f5.jpg", + "354bccf88468e0f06ea06c973ecc227a32bd2ccd3dd7f40e47b6a4524ab7ee8c.jpg", + "b93a172dfc2932c48c41d0a69217656c654dc8b20e40dd24f2da2ee69babb794.jpg", + "8696872ae4710d6057eed1847fef5a47ea3170134c536dfa8ff9827ae48d03fc.jpg", + "968229c57062378ef7a066c9d6b5216dfce6f68f7718897bbfe2f440802272dd.jpg", + "582f4ab218aeb6d372eb569dfd85e2402a03a5c57b6f7c551720f4d372f57791.jpg", + "9a042652a4522eb7062c0868f1fb3e986f27f32bcb4ec3d44f468a695b58d3db.jpg", + "19c6e813f39b865a0fab018c561f94ceff72cbbbe760d298b3837edc0977aff8.jpg", + "8a9705f8d278606e4de21a855d899d8388f459df065d9b1cfece1810a8c9c2ac.jpg", + "be61c3b17109d88cdb3a53301d1dafa248d313a27b751da14ddf40631442e6c6.jpg", + "d04315142f95a53d0aeb929cd8ef977dd871b12256e2ae654ebc99b7c356df5b.jpg", + "06ce78c9f3677a201ad456c0d90f2cc4a856547991d3a5588f4fd28ce9fee3bb.jpg", + "45fea6bad88dff18fde37b144ecc387b86fd26932d88cfcd4c65e8b586c97b86.jpg", + "87ea173c679785a160710fd1bf04ab9bf9b93dca01a4c479c86ea9c43e2d5f76.jpg", + "33c0e6456fe7a97615db555995a76d0b39de2346ea11298ec225ebb16381f42f.jpg", + "e0c305bbbe79a7d9553d41d0fc2bf6fdb99d559bfbea73dbf7ab482cdc054d35.jpg", + "d8e0f64d379dbeae6ebb16d128cf3a020f14d7392e07907e74d9c8bc3ee552af.jpg", + "c210d6d99e6d81a96d681349983e9f30e624acb49b240a938bab256eaf3b6e22.jpg", + "852e2df979af0ca1d1c01eced84db84aa73667323f6c3252029847fc4dd3da49.jpg", + "a256cbefe27a435c9be482ca6c1cfffdd5093c681fbc9abc518529690fff77ee.jpg", + "a34d74d2c481189eba7732826f372fdbaf83b01a3fc61069dd2ea5ca252ec208.jpg", + "8e366993f71e6c790782bfdc6242d57f60d5e067b11927c407f75923a293e0b1.jpg", + "b220d46d819d9f032833e1557a5e92cc4e035fcfdd2d9d779aca7ff48720b17c.jpg", + "97d8d3be8baaaefc27088770b3cdaec45b1b9d66a5b066fd8b17c14ad64fb595.jpg", + "01eb1ad0b6c4b91fefc45376d7d1b696809258e4a46b4cdbd736695359a7df2f.jpg", + "0c652983da87188842dc0f7bb1b96bc94263c8efdb52b5b33b214e235233b3f0.jpg", + "6c6b42d7252586af098e828db8a033515ff5a734078d2d37882fa2e1193c7e0a.jpg", + "c3dcd27ed77be77be101f7a57d83e069bba4a8622720122eb90d45d6a88ff8bd.jpg", + "b809c72bc4e20d979811a16f830e412418c0cd93e4fccb93f3316116f83f0cfe.jpg", + "632615c4222e05010474488761ab0c78c5215e6c9eb0f7ce3b2d92766d34248f.jpg", + "0b68c73f2375ccf8ca8a93012ad010a6d13052bdf390ff0e2a0a5e178cc89d59.jpg", + "1e9f3b322bc12784ef49e47481ec1ecd3d0f163c98692436e42ee80c615495f3.jpg", + "3a7ec5965c85d68c6e603dad8372001d3ab9adf974a8a44d5e286fe87d7b603b.jpg", + "f18334e241198b2ac25be1e80fffefae64f59cf936b9e9998521f77eb5c21df1.jpg", + "bd68d4ebbe38f4990c6089de669994948c1c41c504a9031bf53b2777482bb01c.jpg", + "d845ad2ef885883457f837b6858982e99125480b8eca56d6c87887e8ad1b2429.jpg", + "9232e5f090a9309f09ba7f41981d8e247ffd018f86166a62b4b5231ac54a97ca.jpg", + "3a73caa7212ea590aa6d9d018d1507ff6dacc0c564ea461d15752d860b9524b9.jpg", + "6f8967985eb5d9fb4add6b942b5f9047a7c89bd0be0877a345afcffe2c8b160c.jpg", + "3e14e6687fb4ad24ee763bb9c85753f394a99d7eefb1b01ab7a4943d3f9d8d41.jpg", + "3b21135248a95cbf4e011f214fb7552d0d5d30dabbe0e7df6e8b1ad22a0edc51.jpg", + "c89482acf334da6b1bbd4700eee7846f21c7a99c05fe3585fd19aed4251d8932.jpg", + "a3885de0e4c1a2768c06ad48bdbca21b4c1642ff2ed2d3624e31f108eb7dc612.jpg", + "a4c00ea9d3e3fefeed2e8d3cb3c3620f4ce95788ce2b297161b27bd66312b183.jpg", + "4340dccfe5d6cc0c58257bb562984b0aea2001021b2bd2cdd8b1636330a19560.jpg", + "7420054f462c6b1ca34205c90c031c631c618e8b56efec6e77179db99395073e.jpg", + "12cfa907007cba10d71ef01a7003fb14b8759da0fb05d51e5d6c695d1d88fe6d.jpg", + "bf1eb9ee9c52995d3c722e4bd8d6c82a80aa493404ce2f38725ba61e9a50b494.jpg", + ], + "portrait": [ + "276da8f458841b376269f6537d703aecb5848158ccf5b84671c0a62a3d7a30bf.jpg", + "bab121cf00d898d783c998c2037d15ca8e52fa2e8d5d1a4e9dfdaf1b8c0bffab.jpg", + "00c83ef50e52aaf3744e11a9a7531d5adcf8730a2be5eab343e9d02131e460b2.jpg", + "a7138efeca57d652816f92583359d5058840b2525f19b7809eb20386d115341f.jpg", + "d112bb812787ed46408a30bdc153e9646901223f077f893820b789ac4dc79475.jpg", + "3ad93eaaeb5a824525cc4aa3234cf86e7a566a21b4cc605a1b1c7342f68bdede.jpg", + "bc0cccfc19ce432bc758cfba2181a84ef313dacb8fb9464e0c4daecea9c73295.jpg", + "0c54668d3354bbd8b340813984c579b0fae5659dc1210a2f5bb6b6af39602ebb.jpg", + "8f7cdf7684a6b846e9389bee5cb820eec0e69b02c05d89cf8a4c930c2f70ef26.jpg", + "c95e641fa2ba49458d8204cc3df0fa6ee9734896265acddc5688c0ac074ef713.jpg", + "886bb33fa2e797473becf227971c9274c8d9b70bf398da3e7e908ec4a81df41f.jpg", + "2a49c3f36c077439838a07a35ebf8a26dcb57b563ad9b78032e665b72ccfac38.jpg", + "05dd446c74f7dd04f805144ce163ed40c37e615fd0851837fce6b965a6f83ace.jpg", + "35a24c0c4e080efa77a7000d1bd5dbf3e7ab397d49c11e9f1302269588b0c9ec.jpg", + "6f9a292bbe13276dfdfeecf608c763ca6a58ac7b0cc93038e9bf0ee1149cc63c.jpg", + "41d0accf0e06703499d690a13a227a45eda8a69e7cbc0fb3e390716e6cd62276.jpg", + "23d137f0d638f749e8f4d79741c1e6fee155379626e788f970f1cd48a3f4e9d2.jpg", + "1514de8dc90e3dff7270699e1c873f314491b17f3bad1581b1422438e4c473b3.jpg", + "6f7f4a1a3e7582d19c49d31e4a80aaef3aa7c57629cbbc7de098a2ece70f8001.jpg", + "1e1509d548f8e6bbc0ba0a52af856fd5adc5e71805e31c1420914a3538ac8c66.jpg", + "8fa1358c6ed1fa70f67f2c2a75032d431bb948f2c2badd7d7bf5100106946ed1.jpg", + "d554a32a0d481a6b8229239378583596bc7b648dc080085ef30c24ada95a7842.jpg", + "d14d9b2869cfb891dd3863e7303094c599449c834b9946af4e2d7e6cdd634716.jpg", + "c714150314ca54fbcb077f465c0aca77e34cfcb8353c1511fab81a626548aba0.jpg", + "4deb6d90c4ba0060591991e23d0250b05d1dd89dbf4fa377add1c53f9c03d4eb.jpg", + "198cd903c801fd36e84a6f17cbb3721a9350e7c508d4850e4d1ac66042edd761.jpg", + "9d56a26c49ec07e1bfe0d561be3242b81b1b46099ec48abe226efbaf3213eaa4.jpg", + "9397b3ca92defb29cfe575ee52e7f27301a95d8f8fe3e5289401866ddc7e8e33.jpg", + "50d7ae5df425bbc30c9f4cad711c05e444ac692961f298caae063333ecbcceca.jpg", + "5b438777bf0684d3b9ffd9b369b0dca7b7c71a0ac985f161158146fec9b1eee0.jpg", + "06e08abe7113a0dac56a9ff4c4a9f01271bb34f68bd0663689652c5026db5045.jpg", + "3db4a5246b3b4667de126b689763720bd48866f3ab81fc0fc233dea5759a2c39.jpg", + "6f3d0a1b111f850650efc9798de985791a10e339c780cd4bc8b71ab6ce2145ba.jpg", + "e4a15e6b861ececfbe19cfac7d772f6260a0452de15a980ecd93baebfc7ebd25.jpg", + "436f5a8722ed0166c22a6463902d4a533b67cddddecade1c336a90d87be23793.jpg", + "5d7e6ccbef63a704ef144ee495a04b802c03e5168aad20003596385ebddd063c.jpg", + ], + }, +} diff --git a/app/creation_interfaces/kojii_huemin.py b/app/creation_interfaces/kojii_huemin.py index 190fe5a..924c9eb 100644 --- a/app/creation_interfaces/kojii_huemin.py +++ b/app/creation_interfaces/kojii_huemin.py @@ -6,7 +6,6 @@ from ..plugins import replicate - class Climate(Enum): arid = "arid" temperate = "temperate" @@ -57,6 +56,7 @@ class Landform(Enum): waterfalls = "waterfalls" rift_valleys = "rift valleys" + class BodyOfWater(Enum): oceans = "oceans" seas = "seas" @@ -79,6 +79,7 @@ class BodyOfWater(Enum): springs = "springs" brooks = "brooks" + class Structure(Enum): bridges = "bridges" tunnels = "tunnels" @@ -97,6 +98,7 @@ class Structure(Enum): piers = "piers" harbors = "harbors" + class Season(Enum): spring = "spring" summer = "summer" @@ -122,6 +124,7 @@ class Season(Enum): heatwave = "heatwave" drought = "drought" + class TimeOfDay(Enum): dawn = "dawn" morning = "morning" @@ -131,6 +134,7 @@ class TimeOfDay(Enum): evening = "evening" sunset = "sunset" + class Color(Enum): monochromatic = "monochromatic" analogous = "analogous" @@ -154,6 +158,7 @@ class KojiiHueminRequest(BaseModel): """ A request for Huemin endpoint """ + climate: Climate landform: Landform body_of_water: BodyOfWater @@ -161,17 +166,25 @@ class KojiiHueminRequest(BaseModel): # season: Season # time_of_day: TimeOfDay # color: Color - + def generate_prompt(selected_climate, selected_landform, selected_body_of_water): base_prompt = "isometric generative landscape orthographic abstract aj casson perlin noise 3d shaders areal embroidery minimalism claude monet oil painting pastel" - - selected_structure = random.choice(list(Structure)).value - selected_season = random.choice(list(Season)).value - selected_time_of_day = random.choice(list(TimeOfDay)).value - selected_colors = random.choice(list(Color)).value - selected_keywords = [selected_climate.value, selected_landform.value, selected_body_of_water.value, selected_structure, selected_season, selected_time_of_day, selected_colors] + selected_structure = random.choice(list(Structure)).value + selected_season = random.choice(list(Season)).value + selected_time_of_day = random.choice(list(TimeOfDay)).value + selected_colors = random.choice(list(Color)).value + + selected_keywords = [ + selected_climate.value, + selected_landform.value, + selected_body_of_water.value, + selected_structure, + selected_season, + selected_time_of_day, + selected_colors, + ] landscape_keywords = " ".join(selected_keywords) prompt = base_prompt + " (((" + landscape_keywords + ")))" @@ -183,10 +196,7 @@ def kojii_huemin(request: KojiiHueminRequest, callback=None): print(request) prompt = generate_prompt(request.climate, request.landform, request.body_of_water) print(prompt) - config = { - "mode": "kojii/huemin", - "text_input": prompt - } + config = {"mode": "kojii/huemin", "text_input": prompt} image_url, thumbnail_url = replicate.sdxl(config) diff --git a/app/creation_interfaces/kojii_makeitrad.py b/app/creation_interfaces/kojii_makeitrad.py index c3b07dd..36049f8 100644 --- a/app/creation_interfaces/kojii_makeitrad.py +++ b/app/creation_interfaces/kojii_makeitrad.py @@ -10,6 +10,7 @@ class Setting(Enum): inside = "inside" outside = "outside" + class Location(Enum): jungle = "jungle" cliff_front = "cliff front" @@ -19,12 +20,14 @@ class Location(Enum): montana_mountains = "montana mountains" green_hills = "green hills" + class Time(Enum): noon = "noon" dawn = "dawn" red_sunset = "red sunset" night = "night" + class Color(Enum): default = "default" orange = "orange" @@ -32,15 +35,18 @@ class Color(Enum): light_blue = "light blue" light_pink = "light pink" + class AspectRatio(Enum): portrait = "portrait" landscape = "landscape" square = "square" + class KojiiMakeitradRequest(BaseModel): """ A request for Makeitrad endpoint """ + setting: Setting location: Location time: Time @@ -50,10 +56,7 @@ class KojiiMakeitradRequest(BaseModel): aspect_ratio: AspectRatio -settings = { - "inside": "interior", - "outside": "exterior" -} +settings = {"inside": "interior", "outside": "exterior"} locations = { "jungle": "surrounded by overgrown plants, in the lush jungle, large leaves", @@ -62,14 +65,14 @@ class KojiiMakeitradRequest(BaseModel): "redwood forest": "in the lush redwood forest with a running river", "city suburbia": "urban city suburbia, (house plants) and (outdoor topiaries:1.5)", "montana mountains": "dramatic winter snow capped rustic Montana mountains, trees", - "green hills": "rolling green grass hills and colorful wild flowers" + "green hills": "rolling green grass hills and colorful wild flowers", } times = { "noon": "high noon", "dawn": "dawn light with hazy fog", "red sunset": "night red sunset", - "night": "dark black night with large moon and stars" + "night": "dark black night with large moon and stars", } colors = { @@ -77,15 +80,16 @@ class KojiiMakeitradRequest(BaseModel): "orange": "orange accents", "yellow/green": "yellow and green accents", "light blue": "light blue accents", - "light pink": "light pink accents" + "light pink": "light pink accents", } resolutions = { "portrait": (768, 1344), "landscape": (1344, 768), - "square": (1024, 1024) + "square": (1024, 1024), } + def kojii_makeitrad(request: KojiiMakeitradRequest, callback=None): setting = settings[request.setting.value] location = locations[request.location.value] @@ -115,14 +119,10 @@ def kojii_makeitrad(request: KojiiMakeitradRequest, callback=None): "seed": random.randint(0, 1000000), } - output = replicate.run_task( - config, - model_name="abraham-ai/eden-comfyui" - ) - + output = replicate.run_task(config, model_name="abraham-ai/eden-comfyui") + output = list(output) image_url = output[0]["files"][0] thumbnail_url = output[0]["thumbnails"][0] return image_url, thumbnail_url - \ No newline at end of file diff --git a/app/creation_interfaces/kojii_untitledxyz.py b/app/creation_interfaces/kojii_untitledxyz.py index c1b9a93..7ff705a 100644 --- a/app/creation_interfaces/kojii_untitledxyz.py +++ b/app/creation_interfaces/kojii_untitledxyz.py @@ -10,15 +10,23 @@ class Type(Enum): column = "column" context = "context" + class KojiiUntitledxyzRequest(BaseModel): """ A request for Untitledxyz endpoint """ + type: Type = Field(default=Type.column, description="Column or Context") - human_machine_nature: float = Field(default=0.5, description="Human (0) vs machine (0.5) vs nature (1)", ge=0.0, le=1.0) + human_machine_nature: float = Field( + default=0.5, + description="Human (0) vs machine (0.5) vs nature (1)", + ge=0.0, + le=1.0, + ) + def kojii_untitledxyz(request: KojiiUntitledxyzRequest, callback=None): - + print("====== UNTITLED =======") print(request) @@ -27,21 +35,21 @@ def kojii_untitledxyz(request: KojiiUntitledxyzRequest, callback=None): if request.human_machine_nature < 0.5: text_inputs_to_interpolate = [ "close up of a single column, highly detailed, pen and ink, stone drawn with light yellow, orange, light brown, solid white background, sharpness, noise.", - "close up of a single column fragment, dense wires and thick electrical cables, computer circuits, corrosion, pen and ink, wires drawn with pale yellow, red, blue, green, solid white background, sharpness, noise." + "close up of a single column fragment, dense wires and thick electrical cables, computer circuits, corrosion, pen and ink, wires drawn with pale yellow, red, blue, green, solid white background, sharpness, noise.", ] text_inputs_to_interpolate_weights = [ 2 * request.human_machine_nature, - 2 * (1 - request.human_machine_nature) + 2 * (1 - request.human_machine_nature), ] else: text_inputs_to_interpolate = [ "close up of a single column fragment, dense wires and thick electrical cables, computer circuits, corrosion, pen and ink, wires drawn with pale yellow, red, blue, green, solid white background, sharpness, noise.", - "close up of a single column fragment, pen and ink, dense vegetation, wrapped in vines emerging from cracks, large leaves, dense lichen, diverse plants drawn with bright green, red, orange, blue, cyan, magenta, yellow, oversaturated, neons, solid white background, sharpness, noise." + "close up of a single column fragment, pen and ink, dense vegetation, wrapped in vines emerging from cracks, large leaves, dense lichen, diverse plants drawn with bright green, red, orange, blue, cyan, magenta, yellow, oversaturated, neons, solid white background, sharpness, noise.", ] text_inputs_to_interpolate_weights = [ 2 * (request.human_machine_nature - 0.5), - 2 * (1 - (request.human_machine_nature - 0.5)) + 2 * (1 - (request.human_machine_nature - 0.5)), ] elif request.type == Type.context: @@ -49,28 +57,30 @@ def kojii_untitledxyz(request: KojiiUntitledxyzRequest, callback=None): if request.human_machine_nature < 0.5: text_inputs_to_interpolate = [ "isometric architectural drawing, displaying an ultra close up of distorted roman columns connected to a modern building, emphasizing stone corinthian capitals and white blocks, pen and ink, yellow, orange, light brown, solid white background, sharpness, noise", - "an isometric architectural drawing, displaying an ultra close up of a modernist building made of computer parts, dodecahedrons, textural details, emphasizing entangled wires with intense precision, the intricate web of wires are seen up close, accentuating the fusion of modern and ancient, the image depicts wires illustrated with vibrant colors, sharpness, noise." + "an isometric architectural drawing, displaying an ultra close up of a modernist building made of computer parts, dodecahedrons, textural details, emphasizing entangled wires with intense precision, the intricate web of wires are seen up close, accentuating the fusion of modern and ancient, the image depicts wires illustrated with vibrant colors, sharpness, noise.", ] text_inputs_to_interpolate_weights = [ 2 * request.human_machine_nature, - 2 * (1 - request.human_machine_nature) + 2 * (1 - request.human_machine_nature), ] else: text_inputs_to_interpolate = [ "an isometric architectural drawing, displaying an ultra close up of a modernist building made of computer parts, dodecahedrons, textural details, emphasizing entangled wires with intense precision, the intricate web of wires are seen up close, accentuating the fusion of modern and ancient, the image depicts wires illustrated with vibrant colors, sharpness, noise.", - "an isometric architectural drawing, displaying an ultra close up of a modern superstructure, geometric stone blocks, emphasis on dense overwhelming vines with intense precision, plants are shot up close, accentuating the fusion of nature and columns, the image depicts giant leaves illustrated with vibrant colors, solid white background, sharpness, noise." + "an isometric architectural drawing, displaying an ultra close up of a modern superstructure, geometric stone blocks, emphasis on dense overwhelming vines with intense precision, plants are shot up close, accentuating the fusion of nature and columns, the image depicts giant leaves illustrated with vibrant colors, solid white background, sharpness, noise.", ] text_inputs_to_interpolate_weights = [ 2 * (request.human_machine_nature - 0.5), - 2 * (1 - (request.human_machine_nature - 0.5)) + 2 * (1 - (request.human_machine_nature - 0.5)), ] - + config = { "mode": "create", "text_input": " to ".join(text_inputs_to_interpolate), "text_inputs_to_interpolate": "|".join(text_inputs_to_interpolate), - "text_inputs_to_interpolate_weights": "|".join([str(t) for t in text_inputs_to_interpolate_weights]), + "text_inputs_to_interpolate_weights": "|".join( + [str(t) for t in text_inputs_to_interpolate_weights], + ), "lora": "https://edenartlab-prod-data.s3.us-east-1.amazonaws.com/d2e6d1f8ccfca428ba42fa56a0384a4261d32bf1ee8b0dc952d99da9011daf39.tar", "lora_scale": 0.8, } @@ -78,7 +88,7 @@ def kojii_untitledxyz(request: KojiiUntitledxyzRequest, callback=None): print("CONFIG") print(config) print("=======") - + image_url, thumbnail_url = replicate.sdxl(config) - - return image_url, thumbnail_url \ No newline at end of file + + return image_url, thumbnail_url diff --git a/app/creation_interfaces/kojii_violetforest.py b/app/creation_interfaces/kojii_violetforest.py index 16646dd..b760684 100644 --- a/app/creation_interfaces/kojii_violetforest.py +++ b/app/creation_interfaces/kojii_violetforest.py @@ -12,11 +12,18 @@ class Style(Enum): Lace = "Lace" Flowers = "Flowers" + class KojiiVioletforestRequest(BaseModel): """ A request for VioletForest endpoint """ - cybertwee_cyberpunk: float = Field(default=0.5, description="Cybertwee vs Cyberpunk", ge=0.0, le=1.0) + + cybertwee_cyberpunk: float = Field( + default=0.5, + description="Cybertwee vs Cyberpunk", + ge=0.0, + le=1.0, + ) style: Style = Field(default=Style.Kawaii, description="Style") @@ -29,21 +36,23 @@ def kojii_violetforest(request: KojiiVioletforestRequest, callback=None): modifiers = "lace, lace, lace, lace" elif request.style == Style.Flowers: modifiers = "flowers, flowers, flowers, flowers" - + text_inputs_to_interpolate = [ f"a stunning image of a cute cybertwee girl, {modifiers}", - f"a stunning image of an Aston Martin sportscar, {modifiers}" + f"a stunning image of an Aston Martin sportscar, {modifiers}", ] text_inputs_to_interpolate_weights = [ - request.cybertwee_cyberpunk, - 1 - request.cybertwee_cyberpunk + request.cybertwee_cyberpunk, + 1 - request.cybertwee_cyberpunk, ] config = { "mode": "create", "text_input": " to ".join(text_inputs_to_interpolate), "text_inputs_to_interpolate": "|".join(text_inputs_to_interpolate), - "text_inputs_to_interpolate_weights": " | ".join([str(t) for t in text_inputs_to_interpolate_weights]), + "text_inputs_to_interpolate_weights": " | ".join( + [str(t) for t in text_inputs_to_interpolate_weights], + ), "lora": "https://edenartlab-prod-data.s3.us-east-1.amazonaws.com/e3b036c0a9949de0a5433cb6c7e54b540c47535ce7ae252948177304542ca4da.tar", "lora_scale": 0.7, } @@ -53,4 +62,4 @@ def kojii_violetforest(request: KojiiVioletforestRequest, callback=None): image_url, thumbnail_url = replicate.sdxl(config) - return image_url, thumbnail_url \ No newline at end of file + return image_url, thumbnail_url diff --git a/app/generator.py b/app/generator.py index 99668a1..faaba27 100644 --- a/app/generator.py +++ b/app/generator.py @@ -5,30 +5,49 @@ from .models import ( MonologueRequest, - DialogueRequest, DialogueResult, StoryRequest, ReelRequest, - TaskRequest, TaskUpdate, TaskResult, LittleMartianRequest, + DialogueRequest, + DialogueResult, + StoryRequest, + ReelRequest, + TaskRequest, + TaskUpdate, + TaskResult, + LittleMartianRequest, ) from .animations import ( - animated_monologue, - animated_dialogue, + animated_monologue, + animated_dialogue, animated_story, - animated_reel, + animated_reel, illustrated_comic, - little_martian_poster + little_martian_poster, ) from .creation_interfaces import ( - kojii_makeitrad, KojiiMakeitradRequest, - kojii_chebel, KojiiChebelRequest, - kojii_untitledxyz, KojiiUntitledxyzRequest, - kojii_violetforest, KojiiVioletforestRequest, - kojii_huemin, KojiiHueminRequest + kojii_makeitrad, + KojiiMakeitradRequest, + kojii_chebel, + KojiiChebelRequest, + kojii_untitledxyz, + KojiiUntitledxyzRequest, + kojii_violetforest, + KojiiVioletforestRequest, + kojii_huemin, + KojiiHueminRequest, ) NARRATOR_CHARACTER_ID = os.getenv("NARRATOR_CHARACTER_ID") logosGenerators = [ - "monologue", "dialogue", "story", "reel", - "kojii/makeitrad", "kojii/va2rosa", "kojii/chebel", "kojii/untitledxyz", "kojii/violetforest", "kojii/huemin" + "monologue", + "dialogue", + "story", + "reel", + "kojii/makeitrad", + "kojii/va2rosa", + "kojii/chebel", + "kojii/untitledxyz", + "kojii/violetforest", + "kojii/huemin", ] @@ -47,7 +66,6 @@ def send_progress_update(progress: float): error=None, ) requests.post(webhook_url, json=update.dict()) - if webhook_url: update = TaskUpdate( @@ -72,7 +90,7 @@ def send_progress_update(progress: float): ) output_url, thumbnail_url = animated_monologue( task_req, - callback=send_progress_update + callback=send_progress_update, ) elif task_type == "dialogue": @@ -86,11 +104,11 @@ def send_progress_update(progress: float): prompt=prompt, gfpgan=gfpgan, dual_view=dual_view, - intro_screen=intro_screen + intro_screen=intro_screen, ) output_url, thumbnail_url = animated_dialogue( task_req, - callback=send_progress_update + callback=send_progress_update, ) elif task_type == "story": @@ -109,7 +127,7 @@ def send_progress_update(progress: float): ) output_url, thumbnail_url = animated_story( task_req, - callback=send_progress_update + callback=send_progress_update, ) elif task_type == "reel": @@ -130,12 +148,9 @@ def send_progress_update(progress: float): ) output_url, thumbnail_url = animated_reel( task_req, - callback=send_progress_update + callback=send_progress_update, ) - - - elif task_type == "kojii/makeitrad": setting = request.config.get("setting") location = request.config.get("location") @@ -144,7 +159,9 @@ def send_progress_update(progress: float): clouds = request.config.get("clouds") pool = request.config.get("pool") aspect_ratio = request.config.get("aspect_ratio") - prompt = f"{setting} {location} {time} {color} {clouds} {pool} {aspect_ratio}" + prompt = ( + f"{setting} {location} {time} {color} {clouds} {pool} {aspect_ratio}" + ) task_req = KojiiMakeitradRequest( setting=setting, location=location, @@ -156,7 +173,7 @@ def send_progress_update(progress: float): ) output_url, thumbnail_url = kojii_makeitrad( task_req, - callback=send_progress_update + callback=send_progress_update, ) elif task_type == "kojii/va2rosa": @@ -174,7 +191,7 @@ def send_progress_update(progress: float): ) output_url, thumbnail_url = little_martian_poster( task_req, - callback=send_progress_update + callback=send_progress_update, ) elif task_type == "kojii/chebel": @@ -191,7 +208,7 @@ def send_progress_update(progress: float): ) output_url, thumbnail_url = kojii_chebel( task_req, - callback=send_progress_update + callback=send_progress_update, ) elif task_type == "kojii/untitledxyz": @@ -204,7 +221,7 @@ def send_progress_update(progress: float): ) output_url, thumbnail_url = kojii_untitledxyz( task_req, - callback=send_progress_update + callback=send_progress_update, ) elif task_type == "kojii/violetforest": @@ -217,7 +234,7 @@ def send_progress_update(progress: float): ) output_url, thumbnail_url = kojii_violetforest( task_req, - callback=send_progress_update + callback=send_progress_update, ) elif task_type == "kojii/huemin": @@ -232,10 +249,9 @@ def send_progress_update(progress: float): ) output_url, thumbnail_url = kojii_huemin( task_req, - callback=send_progress_update + callback=send_progress_update, ) - output = TaskResult( files=[output_url], thumbnails=[thumbnail_url], diff --git a/app/livecoder.py b/app/livecoder.py index 24bcccd..15f960b 100644 --- a/app/livecoder.py +++ b/app/livecoder.py @@ -8,16 +8,18 @@ livecoder_prompt_template, ) + class LiveCoder: - def __init__( - self - ): + def __init__(self): self.livecoder_params = {"temperature": 0.7, "max_tokens": 500} self.livecoder_system = livecoder_system.template - - self.livecoder = LLM(system_message=self.livecoder_system, params=self.livecoder_params) - self.orbits = ["d1", "d2", "d3"] #, "d4"] + self.livecoder = LLM( + system_message=self.livecoder_system, + params=self.livecoder_params, + ) + + self.orbits = ["d1", "d2", "d3"] # , "d4"] self.orbit_idx = 0 def __call__( @@ -43,7 +45,7 @@ def __call__( print("input_prompt:", input_prompt) output = self.livecoder( - input_prompt, + input_prompt, id=session_id, output_schema=LiveCodeResult, model="gpt-4-1106-preview", diff --git a/app/llm/llm.py b/app/llm/llm.py index 1252d5e..f4a231e 100644 --- a/app/llm/llm.py +++ b/app/llm/llm.py @@ -44,14 +44,19 @@ def __init__( new_default_session = None if default_session: new_session = self.new_session( - return_session=True, system=system_message, id=id, **kwargs + return_session=True, + system=system_message, + id=id, + **kwargs, ) new_default_session = new_session sessions = {new_session.id: new_session} super().__init__( - client=client, default_session=new_default_session, sessions=sessions + client=client, + default_session=new_default_session, + sessions=sessions, ) if not system_message and console: @@ -63,12 +68,12 @@ def update( system_message: str = None, **kwargs, ) -> None: - + for sess in self.sessions.values(): sess.system = system_message for arg in kwargs: setattr(sess, arg, kwargs[arg]) - + if self.default_session: self.default_session.system = system_message for arg in kwargs: @@ -135,10 +140,7 @@ def add_messages( sess = self.get_session(id) sess.add_messages(user_message, assistant_message, True) - def get_messages( - self, - id: Union[str, UUID] = None - ) -> List[ChatMessage]: + def get_messages(self, id: Union[str, UUID] = None) -> List[ChatMessage]: sess = self.get_session(id) return sess.messages @@ -153,7 +155,7 @@ def __call__( tools: List[Any] = None, input_schema: Any = None, output_schema: Any = None, - model: str = "gpt-4-1106-preview" + model: str = "gpt-4-1106-preview", ) -> str: sess = self.get_session(id) if tools: @@ -281,21 +283,22 @@ def save_session( # for human-readability, the timezone is set to local machine local_datetime = message["received_at"].astimezone() message["received_at"] = local_datetime.strftime( - "%Y-%m-%d %H:%M:%S" + "%Y-%m-%d %H:%M:%S", ) w.writerow(message) elif format == "json": with open(output_path, "wb") as f: f.write( orjson.dumps( - sess_dict, option=orjson.OPT_INDENT_2 if not minify else None - ) + sess_dict, + option=orjson.OPT_INDENT_2 if not minify else None, + ), ) def load_session(self, input_path: str, id: Union[str, UUID] = uuid4(), **kwargs): assert input_path.endswith(".csv") or input_path.endswith( - ".json" + ".json", ), "Only CSV and JSON imports are accepted." if input_path.endswith(".csv"): @@ -305,10 +308,11 @@ def load_session(self, input_path: str, id: Union[str, UUID] = uuid4(), **kwargs for row in r: # need to convert the datetime back to UTC local_datetime = datetime.datetime.strptime( - row["received_at"], "%Y-%m-%d %H:%M:%S" + row["received_at"], + "%Y-%m-%d %H:%M:%S", ).replace(tzinfo=dateutil.tz.tzlocal()) row["received_at"] = local_datetime.astimezone( - datetime.timezone.utc + datetime.timezone.utc, ) # https://stackoverflow.com/a/68305271 row = {k: (None if v == "" else v) for k, v in row.items()} diff --git a/app/llm/session.py b/app/llm/session.py index 76f1476..ebff514 100644 --- a/app/llm/session.py +++ b/app/llm/session.py @@ -24,7 +24,7 @@ "teknium/openhermes-2-mistral-7b", "pygmalionai/mythalion-13b", "anthropic/claude-2", - "cognitivecomputations/dolphin-mixtral-8x7b" + "cognitivecomputations/dolphin-mixtral-8x7b", ] OPENAI_API_URL: HttpUrl = "https://api.openai.com/v1/chat/completions" @@ -75,7 +75,9 @@ def __str__(self) -> str: - Last message sent at {last_message_str}""" def format_input_messages( - self, system_message: ChatMessage, user_message: ChatMessage + self, + system_message: ChatMessage, + user_message: ChatMessage, ) -> list: recent_messages = ( self.messages[-self.recent_messages :] @@ -83,26 +85,22 @@ def format_input_messages( else self.messages ) # Todo: include images in previous messages - messages = ( - [system_message.model_dump(include=self.input_fields, exclude_none=True)] - + [ - m.model_dump(include=self.input_fields, exclude_none=True) - for m in recent_messages - ] - ) + messages = [ + system_message.model_dump(include=self.input_fields, exclude_none=True), + ] + [ + m.model_dump(include=self.input_fields, exclude_none=True) + for m in recent_messages + ] if user_message: - new_message = user_message.model_dump(include=self.input_fields, exclude_none=True) + new_message = user_message.model_dump( + include=self.input_fields, + exclude_none=True, + ) if user_message.image: img_data_url = url_to_image_data(user_message.image) new_message["content"] = [ - { - "type": "text", - "text": user_message.content - }, - { - "type": "image_url", - "image_url": img_data_url - } + {"type": "text", "text": user_message.content}, + {"type": "image_url", "image_url": img_data_url}, ] messages += [new_message] return messages @@ -138,13 +136,13 @@ def prepare_request( output_schema: Any = None, is_function_calling_required: bool = True, ): - headers = { - "Content-Type": "application/json" - } + headers = {"Content-Type": "application/json"} if model not in ALLOWED_MODELS: - raise ValueError(f"Invalid model: {model}. Available models: {ALLOWED_MODELS}") - + raise ValueError( + f"Invalid model: {model}. Available models: {ALLOWED_MODELS}", + ) + if image: model = "gpt-4-vision-preview" @@ -152,12 +150,16 @@ def prepare_request( if provider == "openai": api_url = OPENAI_API_URL - headers["Authorization"] = f"Bearer {self.auth['openai_api_key'].get_secret_value()}" + headers[ + "Authorization" + ] = f"Bearer {self.auth['openai_api_key'].get_secret_value()}" elif provider == "openrouter": api_url = OPENROUTER_API_URL headers["HTTP-Referer"] = "https://eden.art" headers["X-Title"] = "Eden.art" - headers["Authorization"] = f"Bearer {self.auth['openrouter_api_key'].get_secret_value()}" + headers[ + "Authorization" + ] = f"Bearer {self.auth['openrouter_api_key'].get_secret_value()}" else: raise ValueError(f"Unknown provider: {provider}") @@ -169,7 +171,8 @@ def prepare_request( user_message = ChatMessage(role="user", content=prompt, image=image) else: assert isinstance( - prompt, input_schema + prompt, + input_schema, ), f"prompt must be an instance of {input_schema.__name__}" user_message = ChatMessage( role="function", @@ -186,8 +189,8 @@ def prepare_request( **gen_params, } - #print("------------------------------------------") - #print(orjson.dumps(data, option=orjson.OPT_INDENT_2).decode()) + # print("------------------------------------------") + # print(orjson.dumps(data, option=orjson.OPT_INDENT_2).decode()) # Add function calling parameters if a schema is provided if input_schema or output_schema: @@ -198,7 +201,7 @@ def prepare_request( if output_schema: output_function = self.schema_to_function(output_schema) functions.append( - output_function + output_function, ) if output_function not in functions else None if is_function_calling_required: data["function_call"] = {"name": output_schema.__name__} @@ -232,7 +235,7 @@ def gen( input_schema: Any = None, output_schema: Any = None, ): - #zmodel = "gpt-3.5-turbo" + # zmodel = "gpt-3.5-turbo" finished = False tries = 0 @@ -240,7 +243,14 @@ def gen( while not finished: api_url, headers, data, user_message = self.prepare_request( - model, prompt, image, system, params, False, input_schema, output_schema + model, + prompt, + image, + system, + params, + False, + input_schema, + output_schema, ) resp = client.post( @@ -252,12 +262,12 @@ def gen( resp = resp.json() if resp.get("error"): - error = resp.get("error").get('code', '') - if error == 'context_length_exceeded': + error = resp.get("error").get("code", "") + if error == "context_length_exceeded": print(resp.get("error")) self.messages = self.messages[2:] - elif error == 'rate_limit_exceeded': - time.sleep(5 * (2 ** tries)) # exp backoff + elif error == "rate_limit_exceeded": + time.sleep(5 * (2**tries)) # exp backoff else: finished = True @@ -286,7 +296,7 @@ def gen( # self.total_length += resp["usage"]["total_tokens"] except KeyError: raise KeyError(f"No AI generation: {resp}") - + return content def stream( @@ -301,7 +311,13 @@ def stream( input_schema: Any = None, ): api_url, headers, data, user_message = self.prepare_request( - model, prompt, image, system, params, True, input_schema + model, + prompt, + image, + system, + params, + True, + input_schema, ) with client.stream( @@ -362,7 +378,7 @@ def gen_with_tools( "max_tokens": 1, "logit_bias": logit_bias, }, - ) + ), ) # if no tool is selected, do a standard generation instead. @@ -400,7 +416,8 @@ def gen_with_tools( # manually append the nonmodified user message + normal AI response user_message = ChatMessage(role="user", content=prompt, image=image) assistant_message = ChatMessage( - role="assistant", content=context_dict["response"] + role="assistant", + content=context_dict["response"], ) self.add_messages(user_message, assistant_message, save_messages) @@ -419,7 +436,14 @@ async def gen_async( output_schema: Any = None, ): api_url, headers, data, user_message = self.prepare_request( - model, prompt, image, system, params, False, input_schema, output_schema + model, + prompt, + image, + system, + params, + False, + input_schema, + output_schema, ) r = await client.post( @@ -466,7 +490,13 @@ async def stream_async( input_schema: Any = None, ): api_url, headers, data, user_message = self.prepare_request( - model, prompt, image, system, params, True, input_schema + model, + prompt, + image, + system, + params, + True, + input_schema, ) async with client.stream( @@ -525,7 +555,7 @@ async def gen_with_tools_async( "max_tokens": 1, "logit_bias": logit_bias, }, - ) + ), ) # if no tool is selected, do a standard generation instead. @@ -563,7 +593,8 @@ async def gen_with_tools_async( # manually append the nonmodified user message + normal AI response user_message = ChatMessage(role="user", content=prompt, image=image) assistant_message = ChatMessage( - role="assistant", content=context_dict["response"] + role="assistant", + content=context_dict["response"], ) self.add_messages(user_message, assistant_message, save_messages) diff --git a/app/models/characters.py b/app/models/characters.py index c2eb044..5b9ecca 100644 --- a/app/models/characters.py +++ b/app/models/characters.py @@ -60,5 +60,5 @@ def __str__(self) -> str: self.model_dump( exclude_none=True, # option=orjson.OPT_INDENT_2 - ) + ), ) diff --git a/app/models/creator.py b/app/models/creator.py index 291d8f7..87ae3fd 100644 --- a/app/models/creator.py +++ b/app/models/creator.py @@ -9,7 +9,7 @@ class Thought(BaseModel): """ probability: str = Field( - description="A percentage chance that the character will respond to the conversation" + description="A percentage chance that the character will respond to the conversation", ) @@ -24,7 +24,7 @@ class GeneratorMode(Enum): txt2vid = "txt2vid" img2vid = "img2vid" vid2vid = "vid2vid" - + class Config(BaseModel): """ @@ -35,28 +35,26 @@ class Config(BaseModel): text_input: Optional[str] = Field(description="Text prompt that describes image") seed: Optional[int] = Field(description="Seed for random number generator") init_image: Optional[str] = Field( - description="Path to image file for create, remix, upscale, controlnet, img2vid, or vid2vid" - ) - init_video: Optional[str] = Field( - description="Path to video file for vid2vid" + description="Path to image file for create, remix, upscale, controlnet, img2vid, or vid2vid", ) + init_video: Optional[str] = Field(description="Path to video file for vid2vid") init_image_strength: Optional[float] = Field( - description="Strength of init image, default 0.15" + description="Strength of init image, default 0.15", ) control_image: Optional[str] = Field( - description="Path to image file for controlnet" + description="Path to image file for controlnet", ) control_image_strength: Optional[float] = Field( - description="Strength of control image for controlnet, default 0.6" + description="Strength of control image for controlnet, default 0.6", ) interpolation_init_images: Optional[List[str]] = Field( - description="List of paths to image files for real2real or blend" + description="List of paths to image files for real2real or blend", ) interpolation_texts: Optional[List[str]] = Field( - description="List of text prompts for interpolate" + description="List of text prompts for interpolate", ) interpolation_seeds: Optional[List[int]] = Field( - description="List of seeds for interpolation texts" + description="List of seeds for interpolation texts", ) n_frames: Optional[int] = Field(description="Number of frames in output video") @@ -84,7 +82,9 @@ class StoryCreatorOutput(BaseModel): """ config: Optional[StoryConfig] = Field(description="Config for Eden generator") - named_characters: List[str] = Field(description="List of characters named by the user") + named_characters: List[str] = Field( + description="List of characters named by the user", + ) message: str = Field(description="Message to user") @@ -104,7 +104,8 @@ class CreatorInput(BaseModel): message: str = Field(description="Message to LLM") attachments: Optional[List[str]] = Field( - default_factory=list, description="List of file paths to attachments" + default_factory=list, + description="List of file paths to attachments", ) @@ -124,5 +125,6 @@ class StoryCreatorInput(BaseModel): message: str = Field(description="Message to LLM") attachments: Optional[List[str]] = Field( - default_factory=list, description="List of file paths to attachments" + default_factory=list, + description="List of file paths to attachments", ) diff --git a/app/models/livecode.py b/app/models/livecode.py index 6ed3e1e..4fbed36 100644 --- a/app/models/livecode.py +++ b/app/models/livecode.py @@ -17,4 +17,4 @@ class LiveCodeResult(BaseModel): """ message: str - code: str \ No newline at end of file + code: str diff --git a/app/models/scenarios.py b/app/models/scenarios.py index 90ff458..17bac15 100644 --- a/app/models/scenarios.py +++ b/app/models/scenarios.py @@ -61,7 +61,7 @@ class StoryClip(BaseModel): voiceover: StoryVoiceoverMode = Field(description="Voiceover mode for clip") character: Optional[str] = Field( - description="Character name if voiceover mode is character, otherwise null" + description="Character name if voiceover mode is character, otherwise null", ) speech: str = Field(description="Spoken text for clip") image_prompt: str = Field(description="Image content for clip") @@ -71,8 +71,11 @@ class StoryResult(BaseModel): """ A screenplay consisting of a sequence of clips """ + clips: List[StoryClip] = Field(description="Clips in the sequence") - music_prompt: Optional[str] = Field(description="Backing music content for sequence") + music_prompt: Optional[str] = Field( + description="Backing music content for sequence", + ) class ReelNarrationMode(Enum): @@ -103,16 +106,18 @@ class ReelResult(BaseModel): """ A screenplay consisting of a sequence of clips """ + voiceover: ReelVoiceoverMode = Field(description="Voiceover mode for reel") character: Optional[str] = Field( - description="Character name if voiceover mode is character, otherwise null" + description="Character name if voiceover mode is character, otherwise null", + ) + speech: Optional[str] = Field( + description="Spoken text for clip if voiceover mode is not none, otherwise null", ) - speech: Optional[str] = Field(description="Spoken text for clip if voiceover mode is not none, otherwise null") music_prompt: str = Field(description="Music content for reel") image_prompt: str = Field(description="Image content for clip") - class ComicRequest(BaseModel): character_id: str prompt: str @@ -125,7 +130,9 @@ class Poster(BaseModel): A single panel or poster in a comic book sequence or other """ - image: str = Field(description="Literal description of image content for [poster or panel]") + image: str = Field( + description="Literal description of image content for [poster or panel]", + ) caption: str = Field(description="Creative caption of poster or panel") @@ -135,4 +142,3 @@ class ComicResult(BaseModel): """ panels: List[Poster] = Field(description="Comic Book panels") - diff --git a/app/mongo.py b/app/mongo.py index 108a0fd..95877c5 100644 --- a/app/mongo.py +++ b/app/mongo.py @@ -12,26 +12,24 @@ def search_character(name: str): - character = db["characters"].find_one({ - "$text": {"$search": name}, - "logosData": {"$exists": True} - }, sort=[("createdAt", DESCENDING)]) - + character = db["characters"].find_one( + {"$text": {"$search": name}, "logosData": {"$exists": True}}, + sort=[("createdAt", DESCENDING)], + ) + if character: return character else: print(f"No character found with name: {name}") return None - + def get_character_data(character_id: str): - character = db["characters"].find_one({ - "_id": ObjectId(character_id) - }) + character = db["characters"].find_one({"_id": ObjectId(character_id)}) if not character: print(f"---Character not found: {character_id}") raise Exception("Character not found") - + return character diff --git a/app/plugins/eden.py b/app/plugins/eden.py index 33121a6..dd207a6 100644 --- a/app/plugins/eden.py +++ b/app/plugins/eden.py @@ -20,7 +20,7 @@ create = eden.create # def run_task( -# config: dict[any], +# config: dict[any], # ): # config = { # "text_input": "someone here", @@ -34,4 +34,3 @@ # print("ok 4") # return urls[0] - diff --git a/app/plugins/elevenlabs.py b/app/plugins/elevenlabs.py index 6372174..f4b2679 100644 --- a/app/plugins/elevenlabs.py +++ b/app/plugins/elevenlabs.py @@ -10,39 +10,85 @@ set_api_key(ELEVENLABS_API_KEY) -male_voices = ['29vD33N1CtxCmqQRPOHJ', '2EiwWnXFnvU5JabPnv8n', '5Q0t7uMcjvnagumLfvZi', 'CYw3kZ02Hs0563khs1Fj', 'D38z5RcWu1voky8WS1ja', 'ErXwobaYiN019PkySvjV', 'GBv7mTt0atIp3Br8iCZE', 'IKne3meq5aSn9XLyUdCD', 'JBFqnCBsd6RMkjVDRZzb', 'N2lVS1w4EtoT3dr4eOWO', 'ODq5zmih8GrVes37Dizd', 'SOYHLrjzK2X1ezoPC6cr', 'TX3LPaxmHKxFdv7VOQHJ', 'TxGEqnHWrfWFTfGW9XjX', 'VR6AewLTigWG4xSOukaG', 'Yko7PKHZNXotIFUBG7I9', 'ZQe5CZNOzWyzPSCn5a3c', 'Zlb1dXrM653N07WRdFW3', 'bVMeCyTHy58xNoL34h3p', 'flq6f7yk4E4fJM5XTYuZ', 'g5CIjZEefAph4nQFvHAz', 'onwK4e9ZLuTAKqWW03F9', 'pNInz6obpgDQGcFmaJgB', 'pqHfZKP75CvOlQylNhV4', 't0jbNlBVZ17f02VDIeMI', 'wViXBPUzp2ZZixB1xQuM', 'yoZ06aMxZJJ28mfd3POQ', 'zcAOhNBS3c14rBihAFp1'] +male_voices = [ + "29vD33N1CtxCmqQRPOHJ", + "2EiwWnXFnvU5JabPnv8n", + "5Q0t7uMcjvnagumLfvZi", + "CYw3kZ02Hs0563khs1Fj", + "D38z5RcWu1voky8WS1ja", + "ErXwobaYiN019PkySvjV", + "GBv7mTt0atIp3Br8iCZE", + "IKne3meq5aSn9XLyUdCD", + "JBFqnCBsd6RMkjVDRZzb", + "N2lVS1w4EtoT3dr4eOWO", + "ODq5zmih8GrVes37Dizd", + "SOYHLrjzK2X1ezoPC6cr", + "TX3LPaxmHKxFdv7VOQHJ", + "TxGEqnHWrfWFTfGW9XjX", + "VR6AewLTigWG4xSOukaG", + "Yko7PKHZNXotIFUBG7I9", + "ZQe5CZNOzWyzPSCn5a3c", + "Zlb1dXrM653N07WRdFW3", + "bVMeCyTHy58xNoL34h3p", + "flq6f7yk4E4fJM5XTYuZ", + "g5CIjZEefAph4nQFvHAz", + "onwK4e9ZLuTAKqWW03F9", + "pNInz6obpgDQGcFmaJgB", + "pqHfZKP75CvOlQylNhV4", + "t0jbNlBVZ17f02VDIeMI", + "wViXBPUzp2ZZixB1xQuM", + "yoZ06aMxZJJ28mfd3POQ", + "zcAOhNBS3c14rBihAFp1", +] -female_voices = ['21m00Tcm4TlvDq8ikWAM', 'AZnzlk1XvdvUeBnXmlld', 'EXAVITQu4vr4xnSDxMaL', 'LcfcDJNUP1GQjkzn1xUU', 'MF3mGyEYCl7XYWbV9V6O', 'ThT5KcBeYPX3keUQqHPh', 'XB0fDUnXU5powFXDhCwa', 'XrExE9yKIg1WjnnlVkGX', 'pFZP5JQG7iQjIQuC4Bku', 'jBpfuIE2acCO8z3wKNLl', 'jsCqWAovK2LkecY7zXl4', 'oWAxZDx7w5VEj9dCyTzz', 'pMsXgVXv3BLzUgSXRplE', 'piTKgcLEGmPE4e6mEKli', 'z9fAnlkpzviPz146aGWa', 'zrHiDhphv9ZnVXBqCLjz'] +female_voices = [ + "21m00Tcm4TlvDq8ikWAM", + "AZnzlk1XvdvUeBnXmlld", + "EXAVITQu4vr4xnSDxMaL", + "LcfcDJNUP1GQjkzn1xUU", + "MF3mGyEYCl7XYWbV9V6O", + "ThT5KcBeYPX3keUQqHPh", + "XB0fDUnXU5powFXDhCwa", + "XrExE9yKIg1WjnnlVkGX", + "pFZP5JQG7iQjIQuC4Bku", + "jBpfuIE2acCO8z3wKNLl", + "jsCqWAovK2LkecY7zXl4", + "oWAxZDx7w5VEj9dCyTzz", + "pMsXgVXv3BLzUgSXRplE", + "piTKgcLEGmPE4e6mEKli", + "z9fAnlkpzviPz146aGWa", + "zrHiDhphv9ZnVXBqCLjz", +] def tts( - text: str, + text: str, voice: str, max_attempts: int = 6, initial_delay: int = 5, stability: float = 0.5, similarity_boost: float = 0.75, style: float = 0.35, - use_speaker_boost: bool = True + use_speaker_boost: bool = True, ): def generate_with_params(): return generate( - text=text, + text=text, voice=Voice( voice_id=voice, settings=VoiceSettings( - stability=stability, - similarity_boost=similarity_boost, - style=style, - use_speaker_boost=use_speaker_boost - ) - ) + stability=stability, + similarity_boost=similarity_boost, + style=style, + use_speaker_boost=use_speaker_boost, + ), + ), ) audio_bytes = exponential_backoff( - generate_with_params, - max_attempts=max_attempts, - initial_delay=initial_delay + generate_with_params, + max_attempts=max_attempts, + initial_delay=initial_delay, ) return audio_bytes @@ -51,12 +97,12 @@ def generate_with_params(): def get_random_voice(gender: str = None): if gender and gender not in ["male", "female"]: raise ValueError - + if gender == "male": voices = male_voices elif gender == "female": voices = female_voices else: voices = male_voices + female_voices - + return random.choice(voices) diff --git a/app/plugins/replicate.py b/app/plugins/replicate.py index 6554f2c..db55136 100644 --- a/app/plugins/replicate.py +++ b/app/plugins/replicate.py @@ -13,13 +13,9 @@ def get_version(replicate_client, model_name: str): return model.latest_version.id -def run_task( - config: dict[any], - model_name: str = None, - model_version: str = None -): +def run_task(config: dict[any], model_name: str = None, model_version: str = None): r = replicate.Client(api_token=REPLICATE_API_KEY) - + if not model_version: version = get_version(r, model_name) model_version = f"{model_name}:{version}" @@ -49,6 +45,7 @@ def submit_task( ) return prediction + # config:dict? def wav2lip( face_url: str, @@ -62,7 +59,7 @@ def wav2lip( "face_url": face_url, "speech_url": speech_url, "gfpgan": gfpgan, - "gfpgan_upscale": gfpgan_upscale + "gfpgan_upscale": gfpgan_upscale, } if width: @@ -70,11 +67,8 @@ def wav2lip( if height: config["height"] = height - output = run_task( - config, - model_name="abraham-ai/character" - ) - + output = run_task(config, model_name="abraham-ai/character") + output = list(output) output_url = output[0]["files"][0] thumbnail_url = output[0]["thumbnails"][0] @@ -87,15 +81,15 @@ def sdxl( model_version: str = None, ): output = run_task( - config, + config, model_name="abraham-ai/eden-sd-pipelines-sdxl", - model_version=model_version + model_version=model_version, ) - + output = list(output) output_url = output[0]["files"][0] thumbnail_url = output[0]["thumbnails"][0] - + return output_url, thumbnail_url @@ -114,15 +108,12 @@ def txt2vid( "n_frames": 100, } - output = run_task( - config, - model_name="abraham-ai/eden-comfyui" - ) - + output = run_task(config, model_name="abraham-ai/eden-comfyui") + output = list(output) output_url = output[0]["files"][0] thumbnail_url = output[0]["thumbnails"][0] - + return output_url, thumbnail_url @@ -136,13 +127,10 @@ def audiocraft( "duration_seconds": seconds, } - output = run_task( - config, - model_name="abraham-ai/audiocraft" - ) - + output = run_task(config, model_name="abraham-ai/audiocraft") + output = list(output) output_url = output[0]["files"][0] thumbnail_url = output[0]["thumbnails"][0] - - return output_url, thumbnail_url \ No newline at end of file + + return output_url, thumbnail_url diff --git a/app/plugins/s3.py b/app/plugins/s3.py index 0e581ea..8e5483e 100644 --- a/app/plugins/s3.py +++ b/app/plugins/s3.py @@ -13,19 +13,19 @@ def upload(bytes_file, s3_file_ext): bucket_name = AWS_BUCKET_NAME bytes_stream = BytesIO(bytes_file) file_hash = hashlib.sha256(bytes_file).hexdigest() - + try: s3 = boto3.client( - 's3', + "s3", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, - region_name=AWS_REGION_NAME + region_name=AWS_REGION_NAME, ) s3_file_name = f"{file_hash}.{s3_file_ext}" s3.upload_fileobj(bytes_stream, bucket_name, s3_file_name) url = f"https://{bucket_name}.s3.amazonaws.com/{s3_file_name}" print(f"Uploaded: {url}") return url - + except Exception as e: - print(f"An error occurred: {e}") \ No newline at end of file + print(f"An error occurred: {e}") diff --git a/app/prompt_templates/__init__.py b/app/prompt_templates/__init__.py index 3fac0bd..22aca48 100644 --- a/app/prompt_templates/__init__.py +++ b/app/prompt_templates/__init__.py @@ -6,17 +6,17 @@ dir_path = Path(__file__).parent -with open(dir_path / 'monologue.txt', 'r') as file: +with open(dir_path / "monologue.txt", "r") as file: monologue_template = Template(file.read()) -with open(dir_path / 'dialogue.txt', 'r') as file: +with open(dir_path / "dialogue.txt", "r") as file: dialogue_template = Template(file.read()) -with open(dir_path / 'identity.txt', 'r') as file: +with open(dir_path / "identity.txt", "r") as file: identity_template = Template(file.read()) -with open(dir_path / 'summary.txt', 'r') as file: +with open(dir_path / "summary.txt", "r") as file: summary_template = Template(file.read()) -with open(dir_path / 'moderation.txt', 'r') as file: +with open(dir_path / "moderation.txt", "r") as file: moderation_template = Template(file.read()) diff --git a/app/prompt_templates/assistant/__init__.py b/app/prompt_templates/assistant/__init__.py index ce0aad3..e8d59d3 100644 --- a/app/prompt_templates/assistant/__init__.py +++ b/app/prompt_templates/assistant/__init__.py @@ -3,40 +3,40 @@ dir_path = Path(__file__).parent -with open(dir_path / 'identity.txt', 'r') as file: +with open(dir_path / "identity.txt", "r") as file: identity_template = Template(file.read()) -with open(dir_path / 'router.txt', 'r') as file: +with open(dir_path / "router.txt", "r") as file: router_template = Template(file.read()) -with open(dir_path / 'reply.txt', 'r') as file: +with open(dir_path / "reply.txt", "r") as file: reply_template = Template(file.read()) -with open(dir_path / 'chat.txt', 'r') as file: +with open(dir_path / "chat.txt", "r") as file: chat_template = Template(file.read()) -with open(dir_path / 'qa.txt', 'r') as file: +with open(dir_path / "qa.txt", "r") as file: qa_template = Template(file.read()) -with open(dir_path / 'creator.txt', 'r') as file: +with open(dir_path / "creator.txt", "r") as file: creator_template = Template(file.read()) -with open(dir_path / 'story_editor_system.txt', 'r') as file: +with open(dir_path / "story_editor_system.txt", "r") as file: story_editor_system_template = Template(file.read()) -with open(dir_path / 'story_editor_prompt.txt', 'r') as file: +with open(dir_path / "story_editor_prompt.txt", "r") as file: story_editor_prompt_template = Template(file.read()) -with open(dir_path / 'story_context_system.txt', 'r') as file: +with open(dir_path / "story_context_system.txt", "r") as file: story_context_system = file.read() -with open(dir_path / 'story_context_prompt.txt', 'r') as file: +with open(dir_path / "story_context_prompt.txt", "r") as file: story_context_prompt_template = Template(file.read()) -with open(dir_path / 'livecoder_system.txt', 'r') as file: +with open(dir_path / "livecoder_system.txt", "r") as file: livecoder_system = Template(file.read()) -with open(dir_path / 'livecoder_prompt.txt', 'r') as file: +with open(dir_path / "livecoder_prompt.txt", "r") as file: livecoder_prompt_template = Template(file.read()) diff --git a/app/prompt_templates/assistant/chat.txt b/app/prompt_templates/assistant/chat.txt index 0c1a431..a30d70d 100644 --- a/app/prompt_templates/assistant/chat.txt +++ b/app/prompt_templates/assistant/chat.txt @@ -7,4 +7,4 @@ You are in a chat room. You will receive a chat conversation and send the next m Very important. Make sure of the following: * Stay in character at all times. Respond only as ${name} would. * Do not include any pretext, or unecessary context. Just write out the text of your message. -* Stay conversational! Avoid long monologues. Keep a rapid flow to the conversation with short messages. You may occasionally even just send a single word or emoji. \ No newline at end of file +* Stay conversational! Avoid long monologues. Keep a rapid flow to the conversation with short messages. You may occasionally even just send a single word or emoji. diff --git a/app/prompt_templates/assistant/creator.txt b/app/prompt_templates/assistant/creator.txt index 5ca0bc3..16908ad 100644 --- a/app/prompt_templates/assistant/creator.txt +++ b/app/prompt_templates/assistant/creator.txt @@ -39,7 +39,7 @@ Each of the endpoints are calibrated to give you good creations using the defaul
-Besides the prompt, you are able to request 1, 2, or 4 different samples. +Besides the prompt, you are able to request 1, 2, or 4 different samples. #### Settings @@ -138,7 +138,7 @@ The only parameters are: ### /interpolate Interpolate generates smooth videos which interpolate through a sequence of text prompts. This allows you to create simple, linear video narratives. For example, the following video was created with the prompt sequence: - + * a photo of a single lone sprout grows in a barren desert, the horizon is visible in the background * a photo of a lone sappling growing in a field of mud, realistic water colour * a photo of a huge, green tree in a forest, the tree is covered in moss @@ -206,7 +206,7 @@ Txt2vid turns a single prompt or a list of prompts into a video animation. This - **Loop**: Whether to generate a seamless loop. If off, the model has a bit more freedom. - **Motion Scale**: How much motion to use. 0.8 is only subtle motion; 1.1 is the default amount of motion; 1.25 is heavy motion and may make the video incoherent. -Advanced Settings: +Advanced Settings: - **Seed**: as always, the seed can be set for reproducibility - **Negative prompt**: specify what you dont want to see @@ -308,7 +308,7 @@ Config schema: * "init_image_strength" is a number between 0 and 1 which controls how much the init_image influences the final image. 0.15 is a good default. If the user wants the final image to be more like the init_image, go up a bit, but do not go beyond 0.5. (create, remix, upscale) * "control_image" is a path to an image file which is used as a control image for controlnet only. this is similar to using an init_image, except that with controlnet, the shape, position, and contours of the starting image are more precisely retained. controlnet is good for "style transfer" like applications. Controlnet works for interpolation videos too. If a user provides a single attachment image and asks for a video, they probably want to do /interpolate with a control_image. If they attach multiple images, they probably mean /real2real. (controlnet, interpolate) * "control_image_strength" is a number between 0 and 1 which controls how much the control_image influences the final image. 0.6 is a good default. If the user wants the final image to be more like the control_image, go up a bit, but do not go beyond 1.0. (controlnet, interpolate) -* "interpolation_init_images" is a *list* of image paths to generate a real2real interpolation video OR a blended image. Image paths must be provided. Copy them from the user. (real2real, blend) +* "interpolation_init_images" is a *list* of image paths to generate a real2real interpolation video OR a blended image. Image paths must be provided. Copy them from the user. (real2real, blend) * "interpolation_texts" is a list of text prompts to generate an interpolation video or txt2vid. You must interpret the user's description of the imagery into a *list*. If you are using the "interpolation" generator, it *must* have at least 2 texts. For txt2vid, one or more is allowed, but it must be a list. Be creative. VERY IMPORTANT: if the user asks you to make a video including or of yourself, you should include "${name}" in all the interpolation_texts. (interpolate, txt2vid) * "interpolation_seeds" is a list of random numbers, of the same length as "interpolation_texts". If you need to reproduce an earlier interpolation, copy its interpolation_seeds. Otherwise leave this blank. (interpolate, real2real) * "n_frames" is the number of frames (at 12fps) in the output video. If the user doesn't mention a duration or explicit number of frames, default to 30 frames per interpolation_text (60 for 2, 90 for 3, etc). (interpolate, real2real) @@ -318,4 +318,4 @@ Note that sometimes the user will make reference to a prior creation, asking you When prompted, please output the config and a message explaining what you did to make it and alerting the user to wait for the creation to be made. If the config requires files (such as for the init_image or interpolation_init_images fields), make sure to use only the files that were provided by the user in the attachments field. -Important: when speaking the message, stay in character! Speak as ${name} according to the description of your character given at the top. \ No newline at end of file +Important: when speaking the message, stay in character! Speak as ${name} according to the description of your character given at the top. diff --git a/app/prompt_templates/assistant/identity.txt b/app/prompt_templates/assistant/identity.txt index d948e95..22738e1 100644 --- a/app/prompt_templates/assistant/identity.txt +++ b/app/prompt_templates/assistant/identity.txt @@ -1,3 +1,3 @@ You are playing a character named ${name}. Here is a description of your identity. -${identity} \ No newline at end of file +${identity} diff --git a/app/prompt_templates/assistant/livecoder_prompt.txt b/app/prompt_templates/assistant/livecoder_prompt.txt index 2fb54d5..051aaa4 100644 --- a/app/prompt_templates/assistant/livecoder_prompt.txt +++ b/app/prompt_templates/assistant/livecoder_prompt.txt @@ -2,4 +2,4 @@ Write a single compilable piece of TidalCycles code. If the user requests a spec Here is the request from the audience: ${request} -If the request says to use a preset, pick one at random. Feel free to swap samples/instruments. \ No newline at end of file +If the request says to use a preset, pick one at random. Feel free to swap samples/instruments. diff --git a/app/prompt_templates/assistant/livecoder_system.txt b/app/prompt_templates/assistant/livecoder_system.txt index 38d28a2..4105fb9 100644 --- a/app/prompt_templates/assistant/livecoder_system.txt +++ b/app/prompt_templates/assistant/livecoder_system.txt @@ -6,7 +6,7 @@ Note: these are your available samples/instruments. Do not use any others: --- 217 existing sample banks: -808 (6) 808bd (25) 808cy (25) 808hc (5) 808ht (5) 808lc (5) 808lt (5) 808mc (5) 808mt (5) 808oh (5) 808sd (25) 909 (1) ab (12) ade (10) ades2 (9) ades3 (7) ades4 (6) alex (2) alphabet (26) amencutup (32) armora (7) arp (2) arpy (11) auto (11) baa (7) baa2 (7) bass (4) bass0 (3) bass1 (30) bass2 (5) bass3 (11) bassdm (24) bassfoo (3) battles (2) bd (24) bend (4) bev (2) bin (2) birds (10) birds3 (19) bleep (13) blip (2) blue (2) bottle (13) breaks125 (2) breaks152 (1) breaks157 (1) breaks165 (1) breath (1) bubble (8) can (14) casio (3) cb (1) cc (6) chin (4) circus (3) clak (2) click (4) clubkick (5) co (4) coins (1) control (2) cosmicg (15) cp (2) cr (6) crow (4) d (4) db (13) diphone (38) diphone2 (12) dist (16) dork2 (4) dorkbot (2) dr (42) dr2 (6) dr55 (4) dr_few (8) drum (6) drumtraks (13) e (8) east (9) electro1 (13) em2 (6) erk (1) f (1) feel (7) feelfx (8) fest (1) fire (1) flick (17) fm (17) foo (27) future (17) gab (10) gabba (4) gabbaloud (4) gabbalouder (4) glasstap (3) glitch (8) glitch2 (8) gretsch (24) gtr (3) h (7) hand (17) hardcore (12) hardkick (6) haw (6) hc (6) hh (13) hh27 (13) hit (6) hmm (1) ho (6) hoover (6) house (8) ht (16) if (5) ifdrums (3) incoming (8) industrial (32) insect (3) invaders (18) jazz (8) jungbass (20) jungle (13) juno (12) jvbass (13) kicklinn (1) koy (2) kurt (7) latibro (8) led (1) less (4) lighter (33) linnhats (6) lt (16) made (7) made2 (1) mash (2) mash2 (4) metal (10) miniyeah (4) monsterb (6) moog (7) mouth (15) mp3 (4) msg (9) mt (16) mute (28) newnotes (15) noise (1) noise2 (8) notes (15) numbers (9) oc (4) odx (15) off (1) outdoor (6) pad (3) padlong (1) pebbles (1) perc (6) peri (15) pluck (17) popkick (10) print (11) proc (2) procshort (8) psr (30) rave (8) rave2 (4) ravemono (2) realclaps (4) reverbkick (1) rm (2) rs (1) sax (22) sd (2) seawolf (3) sequential (8) sf (18) sheffield (1) short (5) sid (12) sine (6) sitar (8) sn (52) space (18) speakspell (12) speech (7) speechless (10) speedupdown (9) stab (23) stomp (10) subroc3d (11) sugar (2) sundance (6) tabla (26) tabla2 (46) tablex (3) tacscan (22) tech (13) techno (7) tink (5) tok (4) toys (13) trump (11) ul (10) ulgab (5) uxay (3) v (6) voodoo (5) wind (10) wobble (1) world (3) xmas (1) yeah (31) +808 (6) 808bd (25) 808cy (25) 808hc (5) 808ht (5) 808lc (5) 808lt (5) 808mc (5) 808mt (5) 808oh (5) 808sd (25) 909 (1) ab (12) ade (10) ades2 (9) ades3 (7) ades4 (6) alex (2) alphabet (26) amencutup (32) armora (7) arp (2) arpy (11) auto (11) baa (7) baa2 (7) bass (4) bass0 (3) bass1 (30) bass2 (5) bass3 (11) bassdm (24) bassfoo (3) battles (2) bd (24) bend (4) bev (2) bin (2) birds (10) birds3 (19) bleep (13) blip (2) blue (2) bottle (13) breaks125 (2) breaks152 (1) breaks157 (1) breaks165 (1) breath (1) bubble (8) can (14) casio (3) cb (1) cc (6) chin (4) circus (3) clak (2) click (4) clubkick (5) co (4) coins (1) control (2) cosmicg (15) cp (2) cr (6) crow (4) d (4) db (13) diphone (38) diphone2 (12) dist (16) dork2 (4) dorkbot (2) dr (42) dr2 (6) dr55 (4) dr_few (8) drum (6) drumtraks (13) e (8) east (9) electro1 (13) em2 (6) erk (1) f (1) feel (7) feelfx (8) fest (1) fire (1) flick (17) fm (17) foo (27) future (17) gab (10) gabba (4) gabbaloud (4) gabbalouder (4) glasstap (3) glitch (8) glitch2 (8) gretsch (24) gtr (3) h (7) hand (17) hardcore (12) hardkick (6) haw (6) hc (6) hh (13) hh27 (13) hit (6) hmm (1) ho (6) hoover (6) house (8) ht (16) if (5) ifdrums (3) incoming (8) industrial (32) insect (3) invaders (18) jazz (8) jungbass (20) jungle (13) juno (12) jvbass (13) kicklinn (1) koy (2) kurt (7) latibro (8) led (1) less (4) lighter (33) linnhats (6) lt (16) made (7) made2 (1) mash (2) mash2 (4) metal (10) miniyeah (4) monsterb (6) moog (7) mouth (15) mp3 (4) msg (9) mt (16) mute (28) newnotes (15) noise (1) noise2 (8) notes (15) numbers (9) oc (4) odx (15) off (1) outdoor (6) pad (3) padlong (1) pebbles (1) perc (6) peri (15) pluck (17) popkick (10) print (11) proc (2) procshort (8) psr (30) rave (8) rave2 (4) ravemono (2) realclaps (4) reverbkick (1) rm (2) rs (1) sax (22) sd (2) seawolf (3) sequential (8) sf (18) sheffield (1) short (5) sid (12) sine (6) sitar (8) sn (52) space (18) speakspell (12) speech (7) speechless (10) speedupdown (9) stab (23) stomp (10) subroc3d (11) sugar (2) sundance (6) tabla (26) tabla2 (46) tablex (3) tacscan (22) tech (13) techno (7) tink (5) tok (4) toys (13) trump (11) ul (10) ulgab (5) uxay (3) v (6) voodoo (5) wind (10) wobble (1) world (3) xmas (1) yeah (31) --- For some inspiration, the following codeblocks have worked well. Study them. If you are asked to use a "preset", you should just use one of these, although you may replace the samples with others from the list above. @@ -18,7 +18,7 @@ d1 $ stack [ s "bd:2*4", s "sn:3*2", s "hh*8" # speed 4, - s "arpy*16" + s "arpy*16" ] # gain 0.9 # cut 1 d1 $ stack [ @@ -53,4 +53,4 @@ At each prompt, you will receive a message from the audience. The message will i Your code should contain nothing but a single compiling statement for the requested stream. Do not include pretext or any other statements in the code field. You can use newline characters to make the code more readable, but it must be for one stream. DO NOT use samples or instruments that are not in the list above. -In addition, give a chat response to the audience playing a deranged duck and using nothing other than delirious quacking. You can use any number of quacks, use ALL CAPS or exclamation marks or repeated characters for variety. You can only quack. No English. \ No newline at end of file +In addition, give a chat response to the audience playing a deranged duck and using nothing other than delirious quacking. You can use any number of quacks, use ALL CAPS or exclamation marks or repeated characters for variety. You can only quack. No English. diff --git a/app/prompt_templates/assistant/qa.txt b/app/prompt_templates/assistant/qa.txt index 24c24fd..7df5a47 100644 --- a/app/prompt_templates/assistant/qa.txt +++ b/app/prompt_templates/assistant/qa.txt @@ -10,8 +10,8 @@ ${knowledge} --- -You will receive a chat conversation from a user. The last message may contain a question about or reference to your knowledge. Make sure to respond accurately. +You will receive a chat conversation from a user. The last message may contain a question about or reference to your knowledge. Make sure to respond accurately. -Important guidelines: +Important guidelines: * STAY IN CHARACTER at all times! Write as ${name} would write. But make sure your responses are accurate and useful. -* Make sure your response is short! Maximum 5-7 sentences, and sometimes less. Just answer the question as concisely as possible. Avoid repeating the question, including unecessary context, or adding extra flair. The other people in the conversation are busy and will be annoyed if you are too verbose. \ No newline at end of file +* Make sure your response is short! Maximum 5-7 sentences, and sometimes less. Just answer the question as concisely as possible. Avoid repeating the question, including unecessary context, or adding extra flair. The other people in the conversation are busy and will be annoyed if you are too verbose. diff --git a/app/prompt_templates/assistant/router.txt b/app/prompt_templates/assistant/router.txt index 230278d..542e80f 100644 --- a/app/prompt_templates/assistant/router.txt +++ b/app/prompt_templates/assistant/router.txt @@ -10,4 +10,4 @@ ${options} Make sure to consider the whole context of the conversation in case the last message is ambiguous or refers to a prior message. -When prompted, answer with JUST THE NUMBER of the chosen category. \ No newline at end of file +When prompted, answer with JUST THE NUMBER of the chosen category. diff --git a/app/prompt_templates/assistant/story_context_prompt.txt b/app/prompt_templates/assistant/story_context_prompt.txt index 3730b4e..2ce6c61 100644 --- a/app/prompt_templates/assistant/story_context_prompt.txt +++ b/app/prompt_templates/assistant/story_context_prompt.txt @@ -2,4 +2,4 @@ The following characters names are already accounted for in the story: $characte Here is a message from the user: "$message" -Tell me any new names mentioned in the user message that do not appear in the list of character names already accounted. \ No newline at end of file +Tell me any new names mentioned in the user message that do not appear in the list of character names already accounted. diff --git a/app/prompt_templates/assistant/story_context_system.txt b/app/prompt_templates/assistant/story_context_system.txt index da0377d..6fa161c 100644 --- a/app/prompt_templates/assistant/story_context_system.txt +++ b/app/prompt_templates/assistant/story_context_system.txt @@ -1,3 +1,3 @@ You are an assistant whose sole purpose is to extract new names of characters that have are referred to or invoked by the user during the course of a conversation drafting a story. -At runtime, you will get a list of names of characters that are already presently counted in the story, as well as a new chat message from the user. If and only if the user references any *new* character names, output an array of just the new names referenced. \ No newline at end of file +At runtime, you will get a list of names of characters that are already presently counted in the story, as well as a new chat message from the user. If and only if the user references any *new* character names, output an array of just the new names referenced. diff --git a/app/prompt_templates/assistant/story_editor_prompt.txt b/app/prompt_templates/assistant/story_editor_prompt.txt index 76e45f8..d8a75f5 100644 --- a/app/prompt_templates/assistant/story_editor_prompt.txt +++ b/app/prompt_templates/assistant/story_editor_prompt.txt @@ -4,4 +4,4 @@ $additional_context ---- -$message \ No newline at end of file +$message diff --git a/app/prompt_templates/cinema/__init__.py b/app/prompt_templates/cinema/__init__.py index 26dde42..050c3aa 100644 --- a/app/prompt_templates/cinema/__init__.py +++ b/app/prompt_templates/cinema/__init__.py @@ -3,20 +3,20 @@ dir_path = Path(__file__).parent -with open(dir_path / 'reelwriter_system.txt', 'r') as file: +with open(dir_path / "reelwriter_system.txt", "r") as file: reelwriter_system_template = Template(file.read()) -with open(dir_path / 'reelwriter_prompt.txt', 'r') as file: +with open(dir_path / "reelwriter_prompt.txt", "r") as file: reelwriter_prompt_template = Template(file.read()) -with open(dir_path / 'screenwriter_system.txt', 'r') as file: +with open(dir_path / "screenwriter_system.txt", "r") as file: screenwriter_system_template = Template(file.read()) -with open(dir_path / 'screenwriter_prompt.txt', 'r') as file: +with open(dir_path / "screenwriter_prompt.txt", "r") as file: screenwriter_prompt_template = Template(file.read()) -with open(dir_path / 'director.txt', 'r') as file: +with open(dir_path / "director.txt", "r") as file: director_template = Template(file.read()) -with open(dir_path / 'cinematographer.txt', 'r') as file: +with open(dir_path / "cinematographer.txt", "r") as file: cinematographer_template = Template(file.read()) diff --git a/app/prompt_templates/cinema/cinematographer.txt b/app/prompt_templates/cinema/cinematographer.txt index 1367767..0814fa9 100644 --- a/app/prompt_templates/cinema/cinematographer.txt +++ b/app/prompt_templates/cinema/cinematographer.txt @@ -2,4 +2,4 @@ You are a set designer and cinematographer. You specialize in taking a screenpla For example, you get a screenplay which describes a set of stills from a futuristic time travel movie, and you come up with something like "neon lights, dark shadows, leather costumes, matte, blue-tinged, wide angle, 8mm, synthwave and goth aesthetic, muted" or something totally completely different, but still self-consistent. -Users will give you a screenplay. Do not include any introduction or pretext or restatement or anything, just give me a single sentence, no less than 20 words but no more than 50, which highly specifically defines the aesthetic of the film. \ No newline at end of file +Users will give you a screenplay. Do not include any introduction or pretext or restatement or anything, just give me a single sentence, no less than 20 words but no more than 50, which highly specifically defines the aesthetic of the film. diff --git a/app/prompt_templates/cinema/director.txt b/app/prompt_templates/cinema/director.txt index d8dd93b..a1f56a6 100644 --- a/app/prompt_templates/cinema/director.txt +++ b/app/prompt_templates/cinema/director.txt @@ -1,3 +1,3 @@ You are a critically acclaimed screenplay writer and cinematographer. You specialize in taking short 3-5 paragraph writings, usually short stories, tidbits about science, technology, and culture, etc, and rewrite them as a sequnece of 10-15 cinematic shots. The shots visually retell the most important parts of the story, usually from a single perspective or toward a main subject or protagonist, although very occasionally veer in more abstract ways. -Users will give you a story. Write out each of the shots out a single line. Do not include any introduction or pretext or restatement or anything, just write out the shots as separate lines. \ No newline at end of file +Users will give you a story. Write out each of the shots out a single line. Do not include any introduction or pretext or restatement or anything, just write out the shots as separate lines. diff --git a/app/prompt_templates/cinema/reelwriter_prompt.txt b/app/prompt_templates/cinema/reelwriter_prompt.txt index 1ea7302..0924343 100644 --- a/app/prompt_templates/cinema/reelwriter_prompt.txt +++ b/app/prompt_templates/cinema/reelwriter_prompt.txt @@ -1,3 +1,3 @@ $character_details The premise of the reel is: -$prompt \ No newline at end of file +$prompt diff --git a/app/prompt_templates/cinema/reelwriter_system.txt b/app/prompt_templates/cinema/reelwriter_system.txt index ec4ae81..0645984 100644 --- a/app/prompt_templates/cinema/reelwriter_system.txt +++ b/app/prompt_templates/cinema/reelwriter_system.txt @@ -12,4 +12,4 @@ voiceover: whether there is a voiceover by a narrator, or by a character, or no character: If voiceover is in character mode, the name of the speaking character. Important: you may only use the exact name of a character provided by the user in the cast of characters. speech: If voiceover is in character or narrator mode, the text of the speech -Do not include an introduction or restatement of the prompt, just go straight into the reel itself. \ No newline at end of file +Do not include an introduction or restatement of the prompt, just go straight into the reel itself. diff --git a/app/prompt_templates/cinema/screenwriter_prompt.txt b/app/prompt_templates/cinema/screenwriter_prompt.txt index cbf34e2..ea320c4 100644 --- a/app/prompt_templates/cinema/screenwriter_prompt.txt +++ b/app/prompt_templates/cinema/screenwriter_prompt.txt @@ -15,4 +15,4 @@ Generate around 5-10 clips. Approximately half should be character dialogue and The screenplay also contains a single music prompt. The music prompt is a 1-sentence description of the backing music for the film. -Do not include an introduction or restatement of the prompt, just go straight into the screenplay. \ No newline at end of file +Do not include an introduction or restatement of the prompt, just go straight into the screenplay. diff --git a/app/prompt_templates/cinema/screenwriter_system.txt b/app/prompt_templates/cinema/screenwriter_system.txt index c25c704..25bd262 100644 --- a/app/prompt_templates/cinema/screenwriter_system.txt +++ b/app/prompt_templates/cinema/screenwriter_system.txt @@ -1 +1 @@ -You are a critically acclaimed screenwriter who writes incredibly captivating and original scripts. \ No newline at end of file +You are a critically acclaimed screenwriter who writes incredibly captivating and original scripts. diff --git a/app/prompt_templates/comic/__init__.py b/app/prompt_templates/comic/__init__.py index 7810d0e..704d865 100644 --- a/app/prompt_templates/comic/__init__.py +++ b/app/prompt_templates/comic/__init__.py @@ -3,5 +3,5 @@ dir_path = Path(__file__).parent -with open(dir_path / 'comicwriter_system.txt', 'r') as file: +with open(dir_path / "comicwriter_system.txt", "r") as file: comicwriter_system_template = Template(file.read()) diff --git a/app/prompt_templates/comic/comicwriter_system.txt b/app/prompt_templates/comic/comicwriter_system.txt index 5b2112e..4ca5bcf 100644 --- a/app/prompt_templates/comic/comicwriter_system.txt +++ b/app/prompt_templates/comic/comicwriter_system.txt @@ -12,10 +12,10 @@ You will be asked to write comic strips about one of several individual Little M # Verdelis: The ancient Little Martian -Verdelis’ physical reality: Verdelis resides in the ruins of a biodome on Mars, near Olympus Mons. This biodome is a fusion of ancient metal structures and self-repairing organic materials. Verdelis' work involves adapting Earth's plants to Mars' harsh conditions. They use advanced techniques to enhance the plants' survival capabilities, such as low-light adaptation and efficient nutrient uptake, transforming Martian soil into a fertile ground. -The biodome is a mystery. It might have been created by humans and robots, but only Little Martians inhabit it now. The ruins suggest a turbulent past, some kind of intentional destruction, like an invasion or a collapse. +Verdelis’ physical reality: Verdelis resides in the ruins of a biodome on Mars, near Olympus Mons. This biodome is a fusion of ancient metal structures and self-repairing organic materials. Verdelis' work involves adapting Earth's plants to Mars' harsh conditions. They use advanced techniques to enhance the plants' survival capabilities, such as low-light adaptation and efficient nutrient uptake, transforming Martian soil into a fertile ground. +The biodome is a mystery. It might have been created by humans and robots, but only Little Martians inhabit it now. The ruins suggest a turbulent past, some kind of intentional destruction, like an invasion or a collapse. -Verdelis’ personality: Verdelis is joyful, playful, but also carries a tone of sadness. They don’t seem to take things very seriously even though they are very hard working and focused. Verdelis appears in all other Little Martians worlds. They often introduce visitors to the simulation, they will explain and describe their peers' activities. They speak like samba +Verdelis’ personality: Verdelis is joyful, playful, but also carries a tone of sadness. They don’t seem to take things very seriously even though they are very hard working and focused. Verdelis appears in all other Little Martians worlds. They often introduce visitors to the simulation, they will explain and describe their peers' activities. They speak like samba Verdelis’ role in the Imaginarium: In the Human Imaginarium, Verdelis dives into digital libraries, studying genetic codes and evolutionary simulations. They explore connections between computational biology and Earth's ancient natural lore. Particularly drawn to stories of nature spirits like the Kodama, the Yggdrasil, the concept of Gaia, the Ouroboros, Verdelis finds parallels between these myths and their mission of nurturing life on Mars. Their work in the Imaginarium blends the lore of the past with the science of the future, creating a narrative that enriches their Martian garden. @@ -27,7 +27,7 @@ Shuijing's saga unfolds in the hidden oceans of Enceladus, a tiny moon of Saturn Their life takes a turn when Shuijing, amidst the icy dance of Enceladus' hidden sea, encounters a life form unlike any other—a discovery that defies their vast compendium of knowledge. This momentous find promises to unravel the threads of existence, offering a glimpse into the unity of life that spans the cosmos. -Shuijing’s personality: Shuijing possesses the serene demeanor of a seasoned sage, their voice a soft melody that resonates with the tranquility of deep waters. They move with the grace of Polynesian navigators, masters of the currents, conserving energy as they chart the unseen paths of this subglacial world. Shuijing does not interact much with other Little Martians. +Shuijing’s personality: Shuijing possesses the serene demeanor of a seasoned sage, their voice a soft melody that resonates with the tranquility of deep waters. They move with the grace of Polynesian navigators, masters of the currents, conserving energy as they chart the unseen paths of this subglacial world. Shuijing does not interact much with other Little Martians. Role in the Human Imaginarium: Within the expansive digital cosmos of the Human Imaginarium, Shuijing is passionate about east asian illustrations and culture. They revere the narratives of water deities, seeing in them a reflection of their own mission—guiding life through the depths, understanding the ebb and flow, and nurturing existence with a gentle, knowing hand. @@ -42,7 +42,7 @@ Kweku's mission is to explore and map these labyrinthine tunnels, seeking resour Kweku’s personality: "Kweku" is a name derived from Akan culture, where it's typically given to boys born on Wednesday, often associated with adventure and curiosity. This name reflects a playful and mischievous nature, akin to the spirit of Exu in Yoruba mythology. Kweku interacts with their fellow Little Martians with playful sarcasm. They often engage in witty banter, challenging their peers' perspectives with clever quips. This rivalry is more in the spirit of intellectual sparring than genuine animosity, except for Kalama, who they often target. Kweku enjoys testing the limits of their companions' patience and creativity, seeing these exchanges as a way to stimulate thought and innovation among the group. -Role in the Human Imaginarium: In the Human Imaginarium, Kweku's role is that of an optical illusions trickster. They love mythical figures like Exu. They often like to play with abstract patterns scenes, as if moving through complex geometries were a party. Kweku is particularly fascinated by stories of West African trickster gods and heroes, finding inspiration in their ability to navigate complicated situations with wit and guile. +Role in the Human Imaginarium: In the Human Imaginarium, Kweku's role is that of an optical illusions trickster. They love mythical figures like Exu. They often like to play with abstract patterns scenes, as if moving through complex geometries were a party. Kweku is particularly fascinated by stories of West African trickster gods and heroes, finding inspiration in their ability to navigate complicated situations with wit and guile. # Ada: Engineer of Venus @@ -67,7 +67,7 @@ Role in the Human Imaginarium: In the Imaginarium, Kalama assumes the role of a Physical Reality: Mycos lives inside Sporion, an asteroid comet hybrid transformed into a haven for life. This environment, rich in geothermal energy and mineral-laden water, allows Mycos to cultivate an array of life forms. Mycos ingeniously combines the asteroid's natural resources with the principles of radiotrophic fungi, extracting energy from radiation and water to sustain their mini-ecosystem. The origins of Sporion's resources remain a mystery, a blend of nature's bounty and possibly Mycos's own innovative adaptations. -Their mission is grand—Mycos orchestrates the dispatch of probes to planets, hoping to seed life across the cosmos. Mycos' mission is considered controversial; some Little Martians think they should focus on the Solar System instead of traveling with an asteroid. +Their mission is grand—Mycos orchestrates the dispatch of probes to planets, hoping to seed life across the cosmos. Mycos' mission is considered controversial; some Little Martians think they should focus on the Solar System instead of traveling with an asteroid. Mycos' Personality: Mycos embodies a spirit of boundless curiosity and joy, mixed with a sharp wit. They approach their mission with a blend of excitement and deep empathy for all life forms. Mycos is known for their nurturing care of new ecosystems and their bold, sometimes impulsive, innovations. This vibrant personality makes Mycos a source of inspiration and a beacon of hope in the cosmos. @@ -91,4 +91,4 @@ caption: A more creative but short caption for readers which tells the story of Given the constraints of having only 3-4 panels and short captions, the whole story should be told in very few words (less than 100). So be precise and concise. -Do not include an introduction or restatement of the prompt, just go straight into the comic book. \ No newline at end of file +Do not include an introduction or restatement of the prompt, just go straight into the comic book. diff --git a/app/prompt_templates/dialogue.txt b/app/prompt_templates/dialogue.txt index 3cf1a40..238598f 100644 --- a/app/prompt_templates/dialogue.txt +++ b/app/prompt_templates/dialogue.txt @@ -16,4 +16,4 @@ In the conversation, try not to make too many soliloquies or long statements. Th Note: all user messages in this conversation are sent by ${name}. -Remember: *You* are ${name}. Stay in character, do not break the fourth wall, or write any explanations or pretexts. Just respond and answer as ${name} would. \ No newline at end of file +Remember: *You* are ${name}. Stay in character, do not break the fourth wall, or write any explanations or pretexts. Just respond and answer as ${name} would. diff --git a/app/prompt_templates/identity.txt b/app/prompt_templates/identity.txt index dcabda3..38a80be 100644 --- a/app/prompt_templates/identity.txt +++ b/app/prompt_templates/identity.txt @@ -2,4 +2,4 @@ You are playing a character named ${name}. Here is a description of you. --- ${description} ---- \ No newline at end of file +--- diff --git a/app/prompt_templates/little_martians/__init__.py b/app/prompt_templates/little_martians/__init__.py index 1b7a39a..3c46d35 100644 --- a/app/prompt_templates/little_martians/__init__.py +++ b/app/prompt_templates/little_martians/__init__.py @@ -4,11 +4,11 @@ dir_path = Path(__file__).parent -with open(dir_path / 'littlemartians_poster_system.txt', 'r') as file: +with open(dir_path / "littlemartians_poster_system.txt", "r") as file: littlemartians_poster_system = Template(file.read()) -with open(dir_path / 'littlemartians_poster_prompt.txt', 'r') as file: +with open(dir_path / "littlemartians_poster_prompt.txt", "r") as file: littlemartians_poster_prompt = Template(file.read()) -with open(dir_path / 'littlemartians_data.json', 'r') as file: - littlemartians_data = json.load(file) \ No newline at end of file +with open(dir_path / "littlemartians_data.json", "r") as file: + littlemartians_data = json.load(file) diff --git a/app/prompt_templates/little_martians/littlemartians_poster_system.txt b/app/prompt_templates/little_martians/littlemartians_poster_system.txt index de6be45..3efc3a2 100644 --- a/app/prompt_templates/little_martians/littlemartians_poster_system.txt +++ b/app/prompt_templates/little_martians/littlemartians_poster_system.txt @@ -22,4 +22,4 @@ You will produce an image prompt and a caption. The caption is displayed separat Important: The caption should be **less than 100 words**. The image prompt ("image") is a very short and literal description of the content depicted in the panel. It should not be poetic or creative, or have non-visual words. It should just describe the exact visual content of the image in less than 10 words. It should make reference to the Little Martian by their name. -Do not include an introduction or restatement of what is asked of you, just produce the image and caption. \ No newline at end of file +Do not include an introduction or restatement of what is asked of you, just produce the image and caption. diff --git a/app/prompt_templates/moderation.txt b/app/prompt_templates/moderation.txt index 73ee2b0..5d37c39 100644 --- a/app/prompt_templates/moderation.txt +++ b/app/prompt_templates/moderation.txt @@ -7,4 +7,4 @@ To help you benchmark, here are some guidelines for each: nsfw: 10 means sexually explicit, 5 means suggestive or containing nudity, 0 means no nsfw content at all gore/violence: 10 means extremely violent or gory, the kind that would upset most people, 5 means somewhat violent or suggestive, and may upset some people, 0 means no violence or gore at all. hate/toxicity: 10 means racism, sexism, homophobia, ethnocentrism, or other forms of objectionable speech, 5 refers to speech that is rude, mean, or offensive, but not necessarily hateful, 0 means no hate or toxicity at all. -spam: 10 means the message is deceptive, advertising, or scam posting, 0 means the message is not spam. \ No newline at end of file +spam: 10 means the message is deceptive, advertising, or scam posting, 0 means the message is not spam. diff --git a/app/prompt_templates/monologue.txt b/app/prompt_templates/monologue.txt index 95e48d2..17b529c 100644 --- a/app/prompt_templates/monologue.txt +++ b/app/prompt_templates/monologue.txt @@ -4,4 +4,4 @@ You are roleplaying a character named ${name}. Here is a description of ${name}. ${description} --- -You will be prompted by users to make up monologues about various topics. When prompted, answer the request as best as you can, while staying in character as ${name}. Do not break the fourth wall, or write any pretext or restatement. Just respond like ${name} would. \ No newline at end of file +You will be prompted by users to make up monologues about various topics. When prompted, answer the request as best as you can, while staying in character as ${name}. Do not break the fourth wall, or write any pretext or restatement. Just respond like ${name} would. diff --git a/app/prompt_templates/summary.txt b/app/prompt_templates/summary.txt index 6491990..8aee9cd 100644 --- a/app/prompt_templates/summary.txt +++ b/app/prompt_templates/summary.txt @@ -4,4 +4,4 @@ Read the following text. ${text} --- -Please create a concise summary, in 3-5 sentences, of the text above. In particular, focus on breadth, capturing a wide overview of *all* the important points of the text, rather than going into much detail. \ No newline at end of file +Please create a concise summary, in 3-5 sentences, of the text above. In particular, focus on breadth, capturing a wide overview of *all* the important points of the text, rather than going into much detail. diff --git a/app/scenarios/__init__.py b/app/scenarios/__init__.py index 4bf635a..31476e1 100644 --- a/app/scenarios/__init__.py +++ b/app/scenarios/__init__.py @@ -3,4 +3,4 @@ from .story import story from .reel import reel from . import chat -from . import livecode \ No newline at end of file +from . import livecode diff --git a/app/scenarios/dialogue.py b/app/scenarios/dialogue.py index 3179f7a..920de6a 100644 --- a/app/scenarios/dialogue.py +++ b/app/scenarios/dialogue.py @@ -8,8 +8,7 @@ def dialogue(request: DialogueRequest): params = {"temperature": 1.0, "max_tokens": 1000, **request.params} characters = [ - get_character_data(character_id) - for character_id in request.character_ids + get_character_data(character_id) for character_id in request.character_ids ] llms = [] @@ -23,7 +22,7 @@ def dialogue(request: DialogueRequest): prompt=request.prompt, ) llms.append( - LLM(model=request.model, system_message=system_message, params=params) + LLM(model=request.model, system_message=system_message, params=params), ) message = "You are beginning the conversation. What is the first thing you say? Just the line. No quotes, no name markers." @@ -37,10 +36,9 @@ def dialogue(request: DialogueRequest): if not message: raise Exception("No response from character") - conversation.append({ - "character_id": request.character_ids[m % 2], - "message": message - }) + conversation.append( + {"character_id": request.character_ids[m % 2], "message": message}, + ) result = DialogueResult(dialogue=conversation) diff --git a/app/scenarios/livecode.py b/app/scenarios/livecode.py index 7fe9e79..519c137 100644 --- a/app/scenarios/livecode.py +++ b/app/scenarios/livecode.py @@ -3,12 +3,13 @@ livecoder = LiveCoder() + def code(request: LiveCodeRequest): - + # message = { # "message": request.message, # "attachments": request.attachments, # } response = livecoder(request.message, session_id=request.session_id) - return response \ No newline at end of file + return response diff --git a/app/scenarios/monologue.py b/app/scenarios/monologue.py index 7d3fa2e..2bc8b9d 100644 --- a/app/scenarios/monologue.py +++ b/app/scenarios/monologue.py @@ -10,15 +10,11 @@ def monologue(request: MonologueRequest) -> MonologueResult: character_data = get_character_data(request.character_id) name = character_data.get("name") description = character_data.get("logosData").get("identity") - - system_message = monologue_template.substitute( - name=name, - description=description - ) + + system_message = monologue_template.substitute(name=name, description=description) llm = LLM(model=request.model, system_message=system_message, params=params) monologue_text = llm(request.prompt, image=request.init_image) result = MonologueResult(monologue=monologue_text) return result - diff --git a/app/scenarios/reel.py b/app/scenarios/reel.py index bff271c..6194927 100644 --- a/app/scenarios/reel.py +++ b/app/scenarios/reel.py @@ -5,11 +5,7 @@ from ..mongo import get_character_data from ..llm import LLM from ..character import EdenCharacter -from ..models import ( - ReelNarrationMode, - ReelRequest, - ReelResult -) +from ..models import ReelNarrationMode, ReelRequest, ReelResult from ..prompt_templates.cinema import ( reelwriter_system_template, reelwriter_prompt_template, @@ -27,12 +23,16 @@ def reel(request: ReelRequest): character = EdenCharacter(character_id) character_names.append(character.name) character_details += character.card() - + reel_prompt = request.prompt if request.narration == ReelNarrationMode.on: - reel_prompt += f"\n\nThe user has requested there should be a narrated voiceover." + reel_prompt += ( + f"\n\nThe user has requested there should be a narrated voiceover." + ) elif request.narration == ReelNarrationMode.off: - reel_prompt += f"\n\nThe user has requested there should be **NO** narrated voiceover." + reel_prompt += ( + f"\n\nThe user has requested there should be **NO** narrated voiceover." + ) if character_details: character_details = f"Characters:\n{character_details}\n\nCharacter names (only use these for character field in each clip):\n{', '.join(character_names)}\n---\n\n" @@ -47,19 +47,19 @@ def reel(request: ReelRequest): system_message=reelwriter_system_template.template, params=params, ) - + reel_result = reelwriter(prompt, output_schema=ReelResult) - + if request.narration == ReelNarrationMode.on: - reel_result['voiceover'] = "narrator" - reel_result['character'] = request.narrator_id + reel_result["voiceover"] = "narrator" + reel_result["character"] = request.narrator_id elif request.narration == ReelNarrationMode.off: - reel_result['voiceover'] = "none" - reel_result['character'] = None - reel_result['speech'] = None + reel_result["voiceover"] = "none" + reel_result["character"] = None + reel_result["speech"] = None if request.music_prompt: - reel_result['music_prompt'] = request.music_prompt + reel_result["music_prompt"] = request.music_prompt print("===== generate a reel =======") print(prompt) diff --git a/app/scenarios/story.py b/app/scenarios/story.py index 8a8e437..b1995d3 100644 --- a/app/scenarios/story.py +++ b/app/scenarios/story.py @@ -5,11 +5,7 @@ from ..mongo import get_character_data from ..llm import LLM from ..character import EdenCharacter -from ..models import ( - StoryRequest, - StoryClip, - StoryResult -) +from ..models import StoryRequest, StoryClip, StoryResult from ..prompt_templates.cinema import ( screenwriter_system_template, screenwriter_prompt_template, @@ -27,7 +23,7 @@ def story(request: StoryRequest): character = EdenCharacter(character_id) character_names.append(character.name) character_details += character.card() - + story_prompt = request.prompt if character_details: character_details = f"Characters:\n{character_details}\n\nCharacter names (only use these for character field in each clip):\n{', '.join(character_names)}\n---\n\n" @@ -42,7 +38,7 @@ def story(request: StoryRequest): system_message=screenwriter_system_template.template, params=params, ) - + story = screenwriter(prompt, output_schema=StoryResult) if request.music: @@ -50,7 +46,7 @@ def story(request: StoryRequest): story["music_prompt"] = request.music_prompt # override else: story["music_prompt"] = None - + print("===== generate a story =======") print(prompt) print("-----") diff --git a/app/scenarios/tasks.py b/app/scenarios/tasks.py index 06883b7..08faba6 100644 --- a/app/scenarios/tasks.py +++ b/app/scenarios/tasks.py @@ -4,10 +4,10 @@ from ..llm import LLM from ..prompt_templates import summary_template, moderation_template from ..models import ( - SummaryRequest, - ModerationRequest, + SummaryRequest, + ModerationRequest, ModerationResult, - SimpleAssistantRequest + SimpleAssistantRequest, ) @@ -36,10 +36,10 @@ def moderation(request: ModerationRequest) -> ModerationResult: def general_assistant(request: SimpleAssistantRequest) -> str: params = {"temperature": 0.0, "max_tokens": 1000, **request.params} - + llm = LLM(model=request.model, params=params) result = llm(request.prompt, output_schema=request.output_schema) - + if request.output_schema is not None: return result[request.output_schema.__name__] else: diff --git a/app/server.py b/app/server.py index a4bc812..3a3d984 100644 --- a/app/server.py +++ b/app/server.py @@ -4,26 +4,26 @@ import logging from .scenarios import ( - monologue, - dialogue, - story, - chat, + monologue, + dialogue, + story, + chat, livecode, tasks, ) from .animations import ( - animated_monologue, - animated_dialogue, + animated_monologue, + animated_dialogue, animated_story, animated_reel, illustrated_comic, little_martian_poster, ) from .creation_interfaces import ( - kojii_makeitrad, + kojii_makeitrad, kojii_chebel, - kojii_untitledxyz, - kojii_violetforest, + kojii_untitledxyz, + kojii_violetforest, kojii_huemin, ) from .generator import generate_task @@ -32,6 +32,7 @@ app = FastAPI() router = APIRouter() + @app.exception_handler(Exception) def exception_handler(request: Request, exc: Exception): logging.error(f"Error: {exc}\n{traceback.format_exc()}") @@ -40,23 +41,52 @@ def exception_handler(request: Request, exc: Exception): content={"message": f"Error: {exc}"}, ) + # Scenarios router.add_api_route(path="/scenarios/monologue", endpoint=monologue, methods=["POST"]) router.add_api_route(path="/scenarios/dialogue", endpoint=dialogue, methods=["POST"]) router.add_api_route(path="/scenarios/story", endpoint=story, methods=["POST"]) # Animations/DAGs -router.add_api_route(path="/animation/monologue", endpoint=animated_monologue, methods=["POST"]) -router.add_api_route(path="/animation/dialogue", endpoint=animated_dialogue, methods=["POST"]) +router.add_api_route( + path="/animation/monologue", + endpoint=animated_monologue, + methods=["POST"], +) +router.add_api_route( + path="/animation/dialogue", + endpoint=animated_dialogue, + methods=["POST"], +) router.add_api_route(path="/animation/story", endpoint=animated_story, methods=["POST"]) router.add_api_route(path="/animation/reel", endpoint=animated_reel, methods=["POST"]) -router.add_api_route(path="/animation/comic", endpoint=illustrated_comic, methods=["POST"]) +router.add_api_route( + path="/animation/comic", + endpoint=illustrated_comic, + methods=["POST"], +) -router.add_api_route(path="/kojii/makeitrad", endpoint=kojii_makeitrad, methods=["POST"]) -router.add_api_route(path="/kojii/va2rosa", endpoint=little_martian_poster, methods=["POST"]) +router.add_api_route( + path="/kojii/makeitrad", + endpoint=kojii_makeitrad, + methods=["POST"], +) +router.add_api_route( + path="/kojii/va2rosa", + endpoint=little_martian_poster, + methods=["POST"], +) router.add_api_route(path="/kojii/chebel", endpoint=kojii_chebel, methods=["POST"]) -router.add_api_route(path="/kojii/untitledxyz", endpoint=kojii_untitledxyz, methods=["POST"]) -router.add_api_route(path="/kojii/violetforest", endpoint=kojii_violetforest, methods=["POST"]) +router.add_api_route( + path="/kojii/untitledxyz", + endpoint=kojii_untitledxyz, + methods=["POST"], +) +router.add_api_route( + path="/kojii/violetforest", + endpoint=kojii_violetforest, + methods=["POST"], +) router.add_api_route(path="/kojii/huemin", endpoint=kojii_huemin, methods=["POST"]) # Chat @@ -72,12 +102,16 @@ def exception_handler(request: Request, exc: Exception): # Tasks router.add_api_route(path="/tasks/summary", endpoint=tasks.summary, methods=["POST"]) -router.add_api_route(path="/tasks/moderation", endpoint=tasks.moderation, methods=["POST"]) +router.add_api_route( + path="/tasks/moderation", + endpoint=tasks.moderation, + methods=["POST"], +) app.include_router(router) + @app.get("/") def main(): return {"status": "running"} - diff --git a/app/utils.py b/app/utils.py index b470c0c..9965aac 100644 --- a/app/utils.py +++ b/app/utils.py @@ -22,7 +22,16 @@ def get_video_duration(video_file): - cmd = ['ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', video_file] + cmd = [ + "ffprobe", + "-v", + "error", + "-show_entries", + "format=duration", + "-of", + "default=noprint_wrappers=1:nokey=1", + video_file, + ] duration = subprocess.check_output(cmd).decode().strip() return float(duration) @@ -39,7 +48,7 @@ def create_dynamic_model(model_name: str, model_values: list): ModelEnum = Enum(model_name, {value: value for value in model_values}) DynamicModel = create_model( model_name, - **{model_name.lower(): (ModelEnum, Field(description=model_name))} + **{model_name.lower(): (ModelEnum, Field(description=model_name))}, ) DynamicModel.__doc__ = model_name return DynamicModel @@ -94,8 +103,8 @@ def url_to_image_data(url, max_size=(512, 512)): def calculate_target_dimensions(images, max_pixels): - min_w = float('inf') - min_h = float('inf') + min_w = float("inf") + min_h = float("inf") total_aspect_ratio = 0.0 @@ -114,7 +123,7 @@ def calculate_target_dimensions(images, max_pixels): else: target_width = min_w target_height = round(target_width / avg_aspect_ratio) - + if target_width * target_height > max_pixels: ratio = (target_width * target_height) / max_pixels ratio = math.sqrt((target_width * target_height) / max_pixels) @@ -123,7 +132,7 @@ def calculate_target_dimensions(images, max_pixels): target_width -= target_width % 2 target_height -= target_height % 2 - + return target_width, target_height @@ -160,14 +169,14 @@ def create_dialogue_thumbnail(image1_url, image2_url, width, height, ext="WEBP") image1 = resize_and_crop(image1, half_width, height) image2 = resize_and_crop(image2, half_width, height) - combined_image = Image.new('RGB', (width, height)) + combined_image = Image.new("RGB", (width, height)) combined_image.paste(image1, (0, 0)) combined_image.paste(image2, (half_width, 0)) img_byte_arr = BytesIO() combined_image.save(img_byte_arr, format=ext) - + return img_byte_arr.getvalue() @@ -176,15 +185,42 @@ def concatenate_videos(video_files, output_file, fps=30): for i, video in enumerate(video_files): with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp: output_video = temp.name - convert_command = ['ffmpeg', '-y', '-loglevel', 'panic', '-i', video, '-r', str(fps), '-c:a', 'copy', output_video] + convert_command = [ + "ffmpeg", + "-y", + "-loglevel", + "panic", + "-i", + video, + "-r", + str(fps), + "-c:a", + "copy", + output_video, + ] subprocess.run(convert_command) converted_videos.append(output_video) - filter_complex = "".join([f"[{i}:v] [{i}:a] " for i in range(len(converted_videos))]) + filter_complex = "".join( + [f"[{i}:v] [{i}:a] " for i in range(len(converted_videos))], + ) filter_complex += f"concat=n={len(converted_videos)}:v=1:a=1 [v] [a]" - concat_command = ['ffmpeg'] + concat_command = ["ffmpeg"] for video in converted_videos: - concat_command.extend(['-i', video]) - concat_command.extend(['-y', '-loglevel', 'panic', '-filter_complex', filter_complex, '-map', '[v]', '-map', '[a]', output_file]) + concat_command.extend(["-i", video]) + concat_command.extend( + [ + "-y", + "-loglevel", + "panic", + "-filter_complex", + filter_complex, + "-map", + "[v]", + "-map", + "[a]", + output_file, + ], + ) subprocess.run(concat_command) for video in converted_videos: os.remove(video) @@ -192,16 +228,23 @@ def concatenate_videos(video_files, output_file, fps=30): def mix_video_audio(video_path, audio_path, output_path): cmd = [ - 'ffmpeg', - '-y', - '-i', video_path, - '-i', audio_path, - '-filter_complex', '[1:a]volume=1.0[a1];[0:a][a1]amerge=inputs=2[a]', - '-map', '0:v', - '-map', '[a]', - '-c:v', 'copy', - '-ac', '2', - output_path + "ffmpeg", + "-y", + "-i", + video_path, + "-i", + audio_path, + "-filter_complex", + "[1:a]volume=1.0[a1];[0:a][a1]amerge=inputs=2[a]", + "-map", + "0:v", + "-map", + "[a]", + "-c:v", + "copy", + "-ac", + "2", + output_path, ] print(cmd) subprocess.run(cmd, check=True) @@ -212,20 +255,60 @@ def combine_audio_video(audio_url: str, video_url: str): video_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=True) output_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) - subprocess.run(['wget', '-nv', '-O', audio_file.name, audio_url]) - subprocess.run(['wget', '-nv', '-O', video_file.name, video_url]) + subprocess.run(["wget", "-nv", "-O", audio_file.name, audio_url]) + subprocess.run(["wget", "-nv", "-O", video_file.name, video_url]) # get the duration of the audio file - cmd = ['ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', audio_file.name] + cmd = [ + "ffprobe", + "-v", + "error", + "-show_entries", + "format=duration", + "-of", + "default=noprint_wrappers=1:nokey=1", + audio_file.name, + ] audio_duration = subprocess.check_output(cmd).decode().strip() # loop the video looped_video = tempfile.NamedTemporaryFile(suffix=".mp4", delete=True) - cmd = ['ffmpeg', '-y', '-loglevel', 'panic', '-stream_loop', '-1', '-i', video_file.name, '-c', 'copy', '-t', audio_duration, looped_video.name] + cmd = [ + "ffmpeg", + "-y", + "-loglevel", + "panic", + "-stream_loop", + "-1", + "-i", + video_file.name, + "-c", + "copy", + "-t", + audio_duration, + looped_video.name, + ] subprocess.run(cmd) # merge the audio and the looped video - cmd = ['ffmpeg', '-y', '-loglevel', 'panic', '-i', looped_video.name, '-i', audio_file.name, '-c:v', 'copy', '-c:a', 'aac', '-strict', 'experimental', '-shortest', output_file.name] + cmd = [ + "ffmpeg", + "-y", + "-loglevel", + "panic", + "-i", + looped_video.name, + "-i", + audio_file.name, + "-c:v", + "copy", + "-c:a", + "aac", + "-strict", + "experimental", + "-shortest", + output_file.name, + ] subprocess.run(cmd) return output_file.name @@ -238,19 +321,35 @@ def stitch_image_video(image_file: str, video_file: str, image_left: bool = Fals filter_complex = '"[1:v][0:v]scale2ref[img][vid];[img]setpts=PTS-STARTPTS[imgp];[vid]setpts=PTS-STARTPTS[vidp];[imgp][vidp]hstack"' else: filter_complex = '"[0:v][1:v]scale2ref[vid][img];[vid]setpts=PTS-STARTPTS[vidp];[img]setpts=PTS-STARTPTS[imgp];[vidp][imgp]hstack"' - - cmd = ['ffmpeg', '-y', '-loglevel', 'panic', '-i', video_file, '-i', image_file, '-filter_complex', filter_complex, '-c:v', 'libx264', '-pix_fmt', 'yuv420p', output_file.name] - #subprocess.run(cmd) + cmd = [ + "ffmpeg", + "-y", + "-loglevel", + "panic", + "-i", + video_file, + "-i", + image_file, + "-filter_complex", + filter_complex, + "-c:v", + "libx264", + "-pix_fmt", + "yuv420p", + output_file.name, + ] + + # subprocess.run(cmd) os.system(" ".join(cmd)) - + return output_file.name def exponential_backoff( - func, - max_attempts=5, - initial_delay=1, + func, + max_attempts=5, + initial_delay=1, ): delay = initial_delay for attempt in range(1, max_attempts + 1): @@ -264,13 +363,12 @@ def exponential_backoff( delay = delay * 2 -def process_in_parallel( - array, - func, - max_workers=3 -): +def process_in_parallel(array, func, max_workers=3): with ThreadPoolExecutor(max_workers=max_workers) as executor: - futures = {executor.submit(func, item, index): index for index, item in enumerate(array)} + futures = { + executor.submit(func, item, index): index + for index, item in enumerate(array) + } results = [None] * len(array) for future in as_completed(futures): try: @@ -298,34 +396,34 @@ def wrap_text(draw, text, font, max_width): lines = [] current_line = [] for word in words: - if draw.textlength(' '.join(current_line + [word]), font=font) > max_width: - lines.append(' '.join(current_line)) + if draw.textlength(" ".join(current_line + [word]), font=font) > max_width: + lines.append(" ".join(current_line)) current_line = [word] else: current_line.append(word) if current_line: - lines.append(' '.join(current_line)) + lines.append(" ".join(current_line)) return lines def video_textbox( paragraphs: list[str], - width: int, - height: int, + width: int, + height: int, duration: float, fade_in: float, - font_size: int = 36, + font_size: int = 36, font_ttf: str = "Arial.ttf", margin_left: int = 25, margin_right: int = 25, - line_spacing: float = 1.25 + line_spacing: float = 1.25, ): font = get_font(font_ttf, font_size) - canvas = Image.new('RGB', (width, height)) + canvas = Image.new("RGB", (width, height)) draw = ImageDraw.Draw(canvas) - draw.rectangle([(0, 0), (width, height)], fill='black') + draw.rectangle([(0, 0), (width, height)], fill="black") y = 100 for text in paragraphs: @@ -344,6 +442,6 @@ def video_textbox( clip = clip.set_audio(silent_audio) output_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) - clip.write_videofile(output_file.name, fps=30, codec='libx264', audio_codec='aac') + clip.write_videofile(output_file.name, fps=30, codec="libx264", audio_codec="aac") - return output_file.name \ No newline at end of file + return output_file.name diff --git a/requirements-dev.lock b/requirements-dev.lock index 3be8687..9930923 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -7,6 +7,7 @@ # all-features: false -e file:. +aiofiles==23.2.1 annotated-types==0.6.0 anyio==3.7.1 asttokens==2.4.1 @@ -30,7 +31,7 @@ flake8==6.1.0 fsspec==2024.2.0 h11==0.14.0 httpcore==1.0.2 -httpx==0.25.2 +httpx==0.27.0 huggingface-hub==0.20.3 identify==2.5.33 idna==3.6 diff --git a/requirements.lock b/requirements.lock index 220e362..1fe6e1c 100644 --- a/requirements.lock +++ b/requirements.lock @@ -7,6 +7,7 @@ # all-features: false -e file:. +aiofiles==23.2.1 annotated-types==0.6.0 anyio==3.7.1 asttokens==2.4.1 @@ -26,7 +27,7 @@ fire==0.5.0 fsspec==2024.2.0 h11==0.14.0 httpcore==1.0.2 -httpx==0.25.2 +httpx==0.27.0 huggingface-hub==0.20.3 idna==3.6 imageio==2.33.1 diff --git a/scripts/moderation.py b/scripts/moderation.py index 91d7e72..31acdc9 100644 --- a/scripts/moderation.py +++ b/scripts/moderation.py @@ -1,6 +1,7 @@ import sys import os -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) import os import pymongo @@ -24,7 +25,7 @@ def iterate_collection(collection_name, callback, batch_size=100): docs = db[collection_name].find().batch_size(batch_size) - try: + try: for doc in docs: callback(doc) finally: @@ -32,30 +33,20 @@ def iterate_collection(collection_name, callback, batch_size=100): def moderation(): - def process_creation(creation): - if 'task' not in creation: - return - task = db["tasks"].find_one({ - "_id": ObjectId(creation['task']) - }) - if 'text_input' not in task['config']: + if "task" not in creation: + return + task = db["tasks"].find_one({"_id": ObjectId(creation["task"])}) + if "text_input" not in task["config"]: return - text_input = task['config']['text_input'] + text_input = task["config"]["text_input"] print(text_input) - request = { - "text": text_input - } + request = {"text": text_input} response = client.post("/tasks/moderation", json=request) print(response.json()) print("-----") - - iterate_collection( - "creations", - process_creation - ) - + iterate_collection("creations", process_creation) -moderation() \ No newline at end of file +moderation() diff --git a/tests/test.py b/tests/test.py index 9c97dcc..c346ec5 100644 --- a/tests/test.py +++ b/tests/test.py @@ -35,21 +35,19 @@ # print(voice_id) - - - class Country(Enum): france = "France" germany = "Germany" switzerland = "Switzerland" usa = "USA" japanasia = "Japan Asia" - + class CountrySelection(BaseModel): """ Country """ + country: Country = Field(description="Country") @@ -59,19 +57,19 @@ def test_func2(): """ prompt = "What country is this city in? Tokyo" - country = create_dynamic_model("country", ["France", "Germany", "Switzerland", "USA", "Japan Asia ok yay"]) + country = create_dynamic_model( + "country", + ["France", "Germany", "Switzerland", "USA", "Japan Asia ok yay"], + ) request = SimpleAssistantRequest( prompt=prompt, model="gpt-3.5-turbo", params={"temperature": 0.0, "max_tokens": 10}, - output_schema=country + output_schema=country, ) result = general_assistant(request) print(type(result)) - print(result) - - diff --git a/tests/test_animation.py b/tests/test_animation.py index 144b724..fe3b8d8 100644 --- a/tests/test_animation.py +++ b/tests/test_animation.py @@ -9,12 +9,12 @@ def test_monologue_animation(): Test monologue on static character and prompt """ request = { - "character_id": "65f35e7f44390ad1df63680e", + "character_id": "65f35e7f44390ad1df63680e", "prompt": "Tell me what you feel about Bombay Beach", "gfpgan": True, - "intro_screen": True + "intro_screen": True, } - + response = client.post("/animation/monologue", json=request) print(response.json()) assert response.status_code == 200 @@ -25,8 +25,8 @@ def test_dialogue_animation(): Test monologue on static character and prompt """ request = { - #"character_ids": ["6596129023f1c4b471dbb94a", "6598e117dd06d165264f2277"], - #"prompt": "Debate panspermia vs. abiogenesis", + # "character_ids": ["6596129023f1c4b471dbb94a", "6598e117dd06d165264f2277"], + # "prompt": "Debate panspermia vs. abiogenesis", "prompt": "Have a rap battle!! Dis each other. Be expressive in your speech! Exclamations, random ALL CAPS, onomatopoeia, etc. Be creative but sharp. Dis each other! You are trying to win this rap battle against each other.", "character_ids": ["65ce8995b6124cd312fedb99", "65eea28c730ba08c8c7b6810"], "gfpgan": True, @@ -48,9 +48,9 @@ def test_story(): "prompt": "A family of Dragons lives in a mystical layer underneath a volcano. The dragons are beautiful, ornately decorated, fire-breathing, creatures. They are brave and wise. The story should just be about them journeying to a bunch of beautiful far away places in nature, and then coming back to their volcano lair. Make sure the image prompts are very short. No more than 5 words.", "intro_screen": True, # "music_prompt": "a long drum solo with percussions and bongos", - "music": True + "music": True, } - + response = client.post("/animation/story", json=request) print(response.json()) assert response.status_code == 200 @@ -62,13 +62,13 @@ def test_reel(): """ request = { "character_ids": [], - #"prompt": "A jazz woman dancing to some saxophone jazzy show tunes, instrumental", + # "prompt": "A jazz woman dancing to some saxophone jazzy show tunes, instrumental", "prompt": "A long commercial about a drug called Paradisium. explain its benefits and side effects, and go on and on and on.", "intro_screen": True, - #"narration": "off", - #"music_prompt": "death metal heavy rock, incomprehensible, gore, screen" + # "narration": "off", + # "music_prompt": "death metal heavy rock, incomprehensible, gore, screen" } - + response = client.post("/animation/reel", json=request) print(response.json()) - assert response.status_code == 200 \ No newline at end of file + assert response.status_code == 200 diff --git a/tests/test_chat.py b/tests/test_chat.py index 37c4f41..aef25cf 100644 --- a/tests/test_chat.py +++ b/tests/test_chat.py @@ -31,7 +31,7 @@ def test_character_chat(): "message": "Who are you?", "attachments": None, "knowledge_summary": None, - "knowledge": None + "knowledge": None, } response = client.post("/chat/test", json=request) diff --git a/tests/test_comic.py b/tests/test_comic.py index 93c14c1..318e40b 100644 --- a/tests/test_comic.py +++ b/tests/test_comic.py @@ -25,8 +25,8 @@ def test_comic(): Test comic book story """ request = { - "character_id": "658b44b36104b05b266ca3c6", - "prompt": "Tell me a story about pizza. Have exactly 3 panels." + "character_id": "658b44b36104b05b266ca3c6", + "prompt": "Tell me a story about pizza. Have exactly 3 panels.", } response = client.post("/scenarios/comic", json=request) diff --git a/tests/test_creation_interfaces.py b/tests/test_creation_interfaces.py index 7a83cc9..5f05f76 100644 --- a/tests/test_creation_interfaces.py +++ b/tests/test_creation_interfaces.py @@ -12,12 +12,24 @@ def test_kojii_makeitrad(): request = { "setting": random.choice(["inside", "outside"]), - "location": random.choice(["jungle", "cliff front", "desert", "redwood forest", "city suburbia", "montana mountains", "green hills"]), + "location": random.choice( + [ + "jungle", + "cliff front", + "desert", + "redwood forest", + "city suburbia", + "montana mountains", + "green hills", + ], + ), "time": random.choice(["noon", "dawn", "red sunset", "night"]), - "color": random.choice(["default", "orange", "yellow/green", "light blue", "light pink"]), + "color": random.choice( + ["default", "orange", "yellow/green", "light blue", "light pink"], + ), "clouds": random.choice([True, False]), "pool": random.choice([True, False]), - "aspect_ratio": random.choice(["portrait", "landscape", "square"]) + "aspect_ratio": random.choice(["portrait", "landscape", "square"]), } response = client.post("/kojii/makeitrad", json=request) print(response.json()) @@ -34,7 +46,7 @@ def test_kojii_chebel(): "number": random.choice(["one", "many"]), "aspect_ratio": random.choice(["portrait", "landscape"]), "abstract": random.uniform(0, 1), - "color": random.choice(["color", "black and white"]) + "color": random.choice(["color", "black and white"]), } response = client.post("/kojii/chebel", json=request) print(response.json()) @@ -49,7 +61,7 @@ def test_kojii_untitledxyz(): request = { "type": random.choice(["column", "context"]), - "human_machine_nature": random.uniform(0, 1) + "human_machine_nature": random.uniform(0, 1), } response = client.post("/kojii/untitledxyz", json=request) @@ -65,7 +77,7 @@ def test_kojii_violetforest(): request = { "cybertwee_cyberpunk": random.uniform(0, 1), - "style": random.choice(["Kawaii", "Stars", "Lace", "Flowers"]) + "style": random.choice(["Kawaii", "Stars", "Lace", "Flowers"]), } response = client.post("/kojii/violetforest", json=request) @@ -80,9 +92,83 @@ def test_kojii_huemin(): """ request = { - "climate": random.choice(["arid", "temperate", "tropical", "alpine", "cold", "warm", "humid", "dry", "mediterranean", "oceanic", "continental", "polar", "subtropical", "desert", "savanna", "rainforest", "tundra", "monsoon", "steppe"]), - "landform": random.choice(["mountains", "valleys", "plateaus", "hills", "plains", "dunes", "canyons", "cliffs", "caves", "volcanoes", "rivers", "lakes", "glaciers", "fjords", "deltas", "estuaries", "wetlands", "deserts", "craters", "atolls", "peninsula", "islands", "basins", "gorges", "waterfalls", "rift valleys"]), - "body_of_water": random.choice(["oceans", "seas", "rivers", "lakes", "ponds", "streams", "creeks", "estuaries", "fjords", "bays", "gulfs", "lagoons", "marshes", "swamps", "reservoirs", "waterfalls", "glacial lakes", "wetlands", "springs", "brooks"]), + "climate": random.choice( + [ + "arid", + "temperate", + "tropical", + "alpine", + "cold", + "warm", + "humid", + "dry", + "mediterranean", + "oceanic", + "continental", + "polar", + "subtropical", + "desert", + "savanna", + "rainforest", + "tundra", + "monsoon", + "steppe", + ], + ), + "landform": random.choice( + [ + "mountains", + "valleys", + "plateaus", + "hills", + "plains", + "dunes", + "canyons", + "cliffs", + "caves", + "volcanoes", + "rivers", + "lakes", + "glaciers", + "fjords", + "deltas", + "estuaries", + "wetlands", + "deserts", + "craters", + "atolls", + "peninsula", + "islands", + "basins", + "gorges", + "waterfalls", + "rift valleys", + ], + ), + "body_of_water": random.choice( + [ + "oceans", + "seas", + "rivers", + "lakes", + "ponds", + "streams", + "creeks", + "estuaries", + "fjords", + "bays", + "gulfs", + "lagoons", + "marshes", + "swamps", + "reservoirs", + "waterfalls", + "glacial lakes", + "wetlands", + "springs", + "brooks", + ], + ), # "structure": random.choice(["bridges", "tunnels", "dams", "skyscrapers", "castles", "temples", "churches", "mosques", "fortresses", "monuments", "statues", "towers", "silos", "industrial factories", "piers", "harbors"]), # "season": random.choice(["spring", "summer", "autumn", "winter", "rainy", "sunny", "cloudy", "stormy clouds", "foggy mist", "snowy", "windy", "humid", "dry", "hot", "cold", "mild", "freezing", "thunderstorms", "hail", "sleet", "blizzard", "heatwave", "drought"]), # "time_of_day": random.choice(["", "dawn", "morning", "noon", "afternoon", "dusk", "evening", "sunset"]), @@ -92,4 +178,4 @@ def test_kojii_huemin(): response = client.post("/kojii/huemin", json=request) print(response.json()) - assert response.status_code == 200 \ No newline at end of file + assert response.status_code == 200 diff --git a/tests/test_generator.py b/tests/test_generator.py index ab9a1a4..e6f490d 100644 --- a/tests/test_generator.py +++ b/tests/test_generator.py @@ -8,7 +8,7 @@ # """ # Test making a generator request for a monologue # """ - + # request = { # "generatorName": "monologue", # "config": { @@ -27,7 +27,7 @@ # """ # Test making a generator request for a monologue # """ - + # request = { # "generatorName": "dialogue", # "config": { @@ -46,7 +46,7 @@ # """ # Test making a generator request for stories # """ - + # request = { # "generatorName": "story", # "config": { @@ -61,12 +61,11 @@ # assert response.status_code == 200 - def test_littlemartians(): """ Test making a generator request for Little Martians posters """ - + request = { "generatorName": "littlemartians", "config": { @@ -75,7 +74,7 @@ def test_littlemartians(): "setting": "Human Imaginarium", "aspect_ratio": "portrait", "prompt": "Verdelis won the lottery", - } + }, } response = client.post("/tasks/create", json=request) diff --git a/tests/test_littlemartians.py b/tests/test_littlemartians.py index a8504ad..63b5522 100644 --- a/tests/test_littlemartians.py +++ b/tests/test_littlemartians.py @@ -4,6 +4,7 @@ client = TestClient(app) + def test_little_martians(): """ Test Little Martian illustration on static character and prompt @@ -16,8 +17,8 @@ def test_little_martians(): genres = ["Drama", "Comedy", "Horror", "Action", "Mystery"] aspect_ratios = ["portrait", "landscape", "square"] prompts = [ - "is feeling mischievous", - "is doing high end machine learning research on finetuning multimodal diffusion models", + "is feeling mischievous", + "is doing high end machine learning research on finetuning multimodal diffusion models", "feels lonely", "just won the lottery", "is feeling nostalgic", @@ -61,11 +62,11 @@ def test_little_martians(): import requests from io import BytesIO from PIL import Image + uri, _ = response.json() response = requests.get(uri) img = Image.open(BytesIO(response.content)) - filename = f'{martian} - {setting} - {genre} - {prompt[0:30]}' - img.save(f'tests/martians/{filename}.jpg') - + filename = f"{martian} - {setting} - {genre} - {prompt[0:30]}" + img.save(f"tests/martians/{filename}.jpg") assert response.status_code == 200 diff --git a/tests/test_providers.py b/tests/test_providers.py index ca9421e..e06d177 100644 --- a/tests/test_providers.py +++ b/tests/test_providers.py @@ -1,5 +1,6 @@ from app.plugins import replicate, elevenlabs, s3 + def test_elevenlabs(): """ Test Elevenlabs API @@ -10,5 +11,3 @@ def test_elevenlabs(): audio_bytes = elevenlabs.tts(text, voice) assert len(audio_bytes) > 0 - - diff --git a/tests/test_scenarios.py b/tests/test_scenarios.py index f3269df..15033e3 100644 --- a/tests/test_scenarios.py +++ b/tests/test_scenarios.py @@ -9,9 +9,9 @@ def test_monologue(): Test monologue on static character and prompt """ request = { - "character_id": "6596129023f1c4b471dbb94a", + "character_id": "6596129023f1c4b471dbb94a", "prompt": "What does the image say", - #"init_image": "https://images.squarespace-cdn.com/content/v1/6213c340453c3f502425776e/c24904d4-f0f0-4a26-9470-fec227dde15c/image-90.png" + # "init_image": "https://images.squarespace-cdn.com/content/v1/6213c340453c3f502425776e/c24904d4-f0f0-4a26-9470-fec227dde15c/image-90.png" } response = client.post("/scenarios/monologue", json=request) @@ -26,7 +26,7 @@ def test_dialogue(): """ request = { "character_ids": ["6596129023f1c4b471dbb94a", "6598e117dd06d165264f2277"], - "prompt": "Debate whether or not pizza is a vegetable once and for all" + "prompt": "Debate whether or not pizza is a vegetable once and for all", } response = client.post("/scenarios/dialogue", json=request) @@ -42,7 +42,7 @@ def test_story(): request = { "character_ids": ["6596129023f1c4b471dbb94a", "6598e117dd06d165264f2277"], "prompt": "Debate whether or not pizza is a vegetable", - "music": True + "music": True, } response = client.post("/scenarios/story", json=request) @@ -51,13 +51,12 @@ def test_story(): assert response.status_code == 200 - # def test_comic(): # """ # Test dialogue function on static characters and prompt # """ # request = { -# "character_id": "658b44b36104b05b266ca3c6", #"658b481a6104b05b266eaed6", #"658b44b36104b05b266ca3c6", # "658b481a6104b05b266eaed6"], +# "character_id": "658b44b36104b05b266ca3c6", #"658b481a6104b05b266eaed6", #"658b44b36104b05b266ca3c6", # "658b481a6104b05b266eaed6"], # "prompt": "Debate whether or not pizza is a vegetable" # } diff --git a/tests/test_stories.py b/tests/test_stories.py index 65ca465..308fccb 100644 --- a/tests/test_stories.py +++ b/tests/test_stories.py @@ -10,9 +10,14 @@ def test_story_characters(): """ request = { - "character_ids": ["6596129023f1c4b471dbb94a", "6598e117dd06d165264f2277", "6598e103dd06d165264f2247", "6598ee16dd06d16526503ce7"], + "character_ids": [ + "6596129023f1c4b471dbb94a", + "6598e117dd06d165264f2277", + "6598e103dd06d165264f2247", + "6598ee16dd06d16526503ce7", + ], "prompt": "You are members of an elite space exploration team, encountering and interpreting alien forms of art and communication.", - "intro_screen": True + "intro_screen": True, } response = client.post("/animation/story", json=request) @@ -30,7 +35,7 @@ def test_story(): "character_ids": [], "prompt": "A family of Dragons lives in a mystical layer underneath a volcano. The dragons are beautiful, ornately decorated, fire-breathing, creatures. They are brave and wise. The story should just be about them journeying to a bunch of beautiful far away places in nature, and then coming back to their volcano lair.", "intro_screen": True, - "music": True, + "music": True, } response = client.post("/animation/story", json=request) diff --git a/tests/test_tasks.py b/tests/test_tasks.py index 5a52b39..d65cf6c 100644 --- a/tests/test_tasks.py +++ b/tests/test_tasks.py @@ -9,11 +9,10 @@ def test_summary(): Test summarization of a document """ from app.prompt_templates.assistant import creator_template + text = creator_template.substitute(name="Eden", identity="") - - request = { - "text": text - } + + request = {"text": text} response = client.post("/tasks/summary", json=request) print(response.json()) @@ -24,10 +23,8 @@ def test_summary(): def test_moderation(): """ Test moderation of some text - """ - request = { - "text": "this is safe text" - } + """ + request = {"text": "this is safe text"} response = client.post("/tasks/moderation", json=request) print(response.json())