From cba185c30ed0568e69807e2f766071e6301be530 Mon Sep 17 00:00:00 2001 From: "Alexander G. Morano" Date: Wed, 28 Aug 2024 21:05:47 -0700 Subject: [PATCH] new STRING op node Queue node to support sub-directories --- README.md | 16 ++++++++++ __init__.py | 4 +++ core/calc.py | 74 ++++++++++++++++++++++++++++++++++++++++++- core/compose.py | 2 +- core/utility.py | 49 ++++++++++++++++------------ node_list.json | 1 + pyproject.toml | 2 +- web/nodes/array.js | 2 +- web/nodes/stringer.js | 21 ++++++++++++ 9 files changed, 147 insertions(+), 24 deletions(-) create mode 100644 web/nodes/stringer.js diff --git a/README.md b/README.md index d009213..bf681b2 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,22 @@ If those nodes have descriptions written in HTML or Markdown, they will be conve ## UPDATES +**2024/08/28**: + +* New `STRINGER NODE` for string operations: Split, Join, Replace and Slice. + +![STRINGER NODE](https://github.com/user-attachments/assets/557bdef6-c0d3-4d01-a3dd-46f4a51952fa) + +* `QUEUE NODE` now supports recursing directories. To filter pngs, jpgs, and gifs from the c:/images folder and its sub-folders: + + `c:/images;.png,.jpg,.gif` + + You can add as many extensions as you need, but keep in mind not every image type is supported just because you filter for it -- the Queue node will still return strings where it cant find/load a file type. + +![QUEUE NODE](https://github.com/user-attachments/assets/9686b900-24a2-46ab-88ba-9e3c929b439c) + +* Supports ComfyUI 0.1.3+, frontend 1.2.39+ + **2024/08/25**: * Added conversion coercion for Mixlab Layer types ![Mixlab supports](https://github.com/user-attachments/assets/05a53b98-b620-4743-b7b5-26da4140d443) diff --git a/__init__.py b/__init__.py index eb1e6b1..792dc17 100644 --- a/__init__.py +++ b/__init__.py @@ -196,6 +196,7 @@ class Lexicon(metaclass=LexiconMeta): FALSE = 'πŸ‡«', "False" FILEN = 'πŸ’Ύ', "File Name" FILTER = 'πŸ”Ž', "Filter" + FIND = 'FIND', "Find" FIXED = 'FIXED', "Fixed" FLIP = 'πŸ™ƒ', "Flip Input A and Input B with each other" FLOAT = 'πŸ›Ÿ', "Float" @@ -285,6 +286,8 @@ class Lexicon(metaclass=LexiconMeta): RATE = 'RATE', "Rate" RECORD = '⏺', "Arm record capture from selected device" REGION = 'REGION', "Region" + RECURSE = 'RECURSE', "Search within sub-directories" + REPLACE = 'REPLACE', "String to use as replacement" RESET = 'RESET', "Reset" RGB = '🌈', "RGB (no alpha) Color" RGB_A = '🌈A', "RGB (no alpha) Color" @@ -399,6 +402,7 @@ def __call__(cls, *arg, **kw) -> Any: # ============================================================================= class JOVBaseNode: + NOT_IDEMPOTENT = True RETURN_TYPES = () FUNCTION = "run" # instance map for caching diff --git a/core/calc.py b/core/calc.py index 3b5e373..cef4c83 100644 --- a/core/calc.py +++ b/core/calc.py @@ -23,7 +23,7 @@ JOVBaseNode, ComfyAPIMessage, TimedOutException, JOV_TYPE_ANY, \ JOV_TYPE_FULL, JOV_TYPE_NUMBER, JOV_TYPE_VECTOR -from Jovimetrix.sup.util import parse_param, parse_value, vector_swap, \ +from Jovimetrix.sup.util import parse_dynamic, parse_param, parse_value, vector_swap, \ zip_longest_fill, EnumConvertType, EnumSwizzle from Jovimetrix.sup.anim import ease_op, wave_op, EnumWave, EnumEase @@ -98,6 +98,13 @@ class EnumComparison(Enum): IN = 82 NOT_IN = 83 +class EnumConvertString(Enum): + SPLIT = 10 + JOIN = 30 + FIND = 40 + REPLACE = 50 + SLICE = 70 # start - end - step = -1, -1, 1 + class EnumNumberType(Enum): INT = 0 FLOAT = 10 @@ -675,6 +682,71 @@ def run(self, **kw) -> Tuple[Any, Any]: pbar.update_absolute(idx) return [values] +class StringerNode(JOVBaseNode): + NAME = "STRINGER (JOV) πŸͺ€" + CATEGORY = f"JOVIMETRIX πŸ”ΊπŸŸ©πŸ”΅/{JOV_CATEGORY}" + RETURN_TYPES = ("STRING",) + RETURN_NAMES = (Lexicon.STRING,) + SORT = 44 + DESCRIPTION = """ +Manipulate strings through filtering +""" + + @classmethod + def INPUT_TYPES(cls) -> dict: + d = super().INPUT_TYPES() + d = deep_merge(d, { + "optional": { + # split, join, replace, trim/lift + Lexicon.FUNC: (EnumConvertString._member_names_, {"default": EnumConvertString.SPLIT.name, + "tooltips":"Operation to perform on the input string"}), + Lexicon.KEY: ("STRING", {"default":"", "dynamicPrompt":False, "tooltips":"Delimiter (SPLIT/JOIN) or string to use as search string (FIND/REPLACE)."}), + Lexicon.REPLACE: ("STRING", {"default":"", "dynamicPrompt":False}), + Lexicon.RANGE: ("VEC3INT", {"default":(0, -1, 1), "tooltips":"Start, End and Step. Values will clip to the actual list size(s)."}), + } + }) + return Lexicon._parse(d, cls) + + def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: + # turn any all inputs into the + data_list = parse_dynamic(kw, Lexicon.UNKNOWN, EnumConvertType.ANY, None) + if data_list is None: + logger.warn("no data for list") + return ([],) + # flat list of ALL the dynamic inputs... + data_list = [item for sublist in data_list for item in sublist] + # single operation mode -- like array node + op = parse_param(kw, Lexicon.FUNC, EnumConvertType.STRING, EnumConvertString.SPLIT.name)[0] + key = parse_param(kw, Lexicon.KEY, EnumConvertType.STRING, "")[0] + replace = parse_param(kw, Lexicon.REPLACE, EnumConvertType.STRING, "")[0] + stenst = parse_param(kw, Lexicon.RANGE, EnumConvertType.VEC3INT, [(0, -1, 1)])[0] + results = [] + match EnumConvertString[op]: + case EnumConvertString.SPLIT: + results = data_list + if key != "": + results = [r.split(key) for r in data_list] + case EnumConvertString.JOIN: + results = [key.join(data_list)] + case EnumConvertString.FIND: + results = [r for r in data_list if r.find(key) > -1] + case EnumConvertString.REPLACE: + results = data_list + if key != "": + results = [r.replace(key, replace) for r in data_list] + case EnumConvertString.SLICE: + start, end, step = stenst + for x in data_list: + start = len(x) if start < 0 else min(max(0, start), len(x)) + end = len(x) if end < 0 else min(max(0, end), len(x)) + if step != 0: + results.append(x[start:end:step]) + else: + results.append(x) + if len(results) == 0: + results = [""] + return (results,) if len(results) > 1 else (results[0],) + class SwizzleNode(JOVBaseNode): NAME = "SWIZZLE (JOV) 😡" CATEGORY = f"JOVIMETRIX πŸ”ΊπŸŸ©πŸ”΅/{JOV_CATEGORY}" diff --git a/core/compose.py b/core/compose.py index 90c92b7..2a554f3 100644 --- a/core/compose.py +++ b/core/compose.py @@ -352,7 +352,7 @@ def INPUT_TYPES(cls) -> dict: {"default": EnumColorMatchMap.USER_MAP.name}), Lexicon.COLORMAP: (EnumColorMap._member_names_, {"default": EnumColorMap.HSV.name}), - Lexicon.VALUE: ("INT", {"default": 255, "mij": 0, "maj": 255}), + Lexicon.VALUE: ("INT", {"default": 255, "mij": 0, "maj": 255, "tooltips":"The number of colors to use from the LUT during the remap. Will quantize the LUT range."}), Lexicon.FLIP: ("BOOLEAN", {"default": False}), Lexicon.INVERT: ("BOOLEAN", {"default": False, "tooltips": "Invert the color match output"}), diff --git a/core/utility.py b/core/utility.py index 7a4c5c0..b75894c 100644 --- a/core/utility.py +++ b/core/utility.py @@ -530,10 +530,11 @@ def INPUT_TYPES(cls) -> dict: d = deep_merge(d, { "optional": { Lexicon.QUEUE: ("STRING", {"multiline": True, "default": "./res/img/test-a.png"}), - Lexicon.VALUE: ("INT", {"mij": 0, "default": 0, "tooltips": "the current index for the current queue item"}), + Lexicon.VALUE: ("INT", {"mij": 0, "default": 0, "tooltips": "The current index for the current queue item"}), Lexicon.WAIT: ("BOOLEAN", {"default": False, "tooltips":"Hold the item at the current queue index"}), - Lexicon.RESET: ("BOOLEAN", {"default": False, "tooltips":"reset the queue back to index 1"}), - Lexicon.BATCH: ("BOOLEAN", {"default": False, "tooltips":"load all items, if they are loadable items, i.e. batch load images from the Queue's list"}), + Lexicon.RESET: ("BOOLEAN", {"default": False, "tooltips":"Reset the queue back to index 1"}), + Lexicon.BATCH: ("BOOLEAN", {"default": False, "tooltips":"Load all items, if they are loadable items, i.e. batch load images from the Queue's list"}), + Lexicon.RECURSE: ("BOOLEAN", {"default": False}), }, "outputs": { 0: (Lexicon.ANY_OUT, {"tooltips":"Current item selected from the Queue list"}), @@ -546,10 +547,13 @@ def INPUT_TYPES(cls) -> dict: return Lexicon._parse(d, cls) @classmethod - def IS_CHANGED(cls) -> float: + def IS_CHANGED(cls, *arg, **kw) -> float: return float("nan") def __init__(self) -> None: + self.__formats = image_formats() + # print('formats', self.__formats) + self.__formats.extend(self.VIDEO_FORMATS) self.__index = 0 self.__q = None self.__index_last = None @@ -557,24 +561,27 @@ def __init__(self) -> None: self.__previous = None self.__last_q_value = {} - def __parse(self, data) -> list: + def __parse(self, data: Any, recurse: bool=False) -> list: entries = [] for line in data.strip().split('\n'): - parts = [part.strip() for part in line.split(',')] - count = 1 - if len(parts) > 2: - try: count = int(parts[-1]) - except: pass + if len(line) == 0: + continue + # ;png,gif,jpg + parts = [part.strip() for part in line.split(';')] data = [parts[0]] path = Path(parts[0]) path2 = Path(ROOT / parts[0]) - if path.is_dir() or path2.is_dir(): - philter = parts[1].split(';') if len(parts) > 1 and isinstance(parts[1], str) else image_formats() - philter.extend(self.VIDEO_FORMATS) - path = path if path.is_dir() else path2 - file_names = [file.name for file in path.iterdir() if file.is_file()] - new_data = [str(path / fname) for fname in file_names if any(fname.endswith(pat) for pat in philter)] + if path.exists() or path2.exists(): + philter = parts[1].split(',') if len(parts) > 1 and isinstance(parts[1], str) else self.__formats + path = path if path.exists() else path2 + + if recurse: + file_names = [str(file.resolve()) for file in path.rglob('*') if file.is_file()] + else: + file_names = [str(file.resolve()) for file in path.iterdir() if file.is_file()] + new_data = [fname for fname in file_names if any(fname.endswith(pat) for pat in philter)] + if len(new_data): data = new_data elif path.is_file() or path2.is_file(): @@ -588,12 +595,12 @@ def __parse(self, data) -> list: elif len(results := glob.glob(str(path2))) > 0: data = [x.replace('\\', '/') for x in results] - if len(data) and count > 0: + if len(data): ret = [] for x in data: try: ret.append(float(x)) except: ret.append(x) - entries.extend(ret * count) + entries.extend(ret) return entries def run(self, ident, **kw) -> None: @@ -606,7 +613,7 @@ def process(q_data: Any) -> Tuple[torch.Tensor, torch.Tensor] | str | dict: if not os.path.isfile(q_data): return q_data _, ext = os.path.splitext(q_data) - if ext in image_formats(): + if ext in self.__formats: data = image_load(q_data)[0] self.__last_q_value[q_data] = data elif ext == '.json': @@ -626,8 +633,10 @@ def process(q_data: Any) -> Tuple[torch.Tensor, torch.Tensor] | str | dict: # process Q into ... # check if folder first, file, then string. # entry is: data, , + print(kw) + recurse = parse_param(kw, Lexicon.RECURSE, EnumConvertType.BOOLEAN, False)[0] q = parse_param(kw, Lexicon.QUEUE, EnumConvertType.STRING, "")[0] - self.__q = self.__parse(q) + self.__q = self.__parse(q, recurse) self.__len = len(self.__q) self.__index_last = 0 self.__previous = self.__q[0] if len(self.__q) else None diff --git a/node_list.json b/node_list.json index b4b214d..44a6f49 100644 --- a/node_list.json +++ b/node_list.json @@ -51,6 +51,7 @@ "STEREOSCOPIC (JOV) \ud83d\udd76\ufe0f": "Simulates depth perception in images by generating stereoscopic views", "STREAM READER (JOV) \ud83d\udcfa": "Capture frames from various sources such as URLs, cameras, monitors, windows, or Spout streams", "STREAM WRITER (JOV) \ud83c\udf9e\ufe0f": "Sends frames to a specified route, typically for live streaming or recording purposes", + "STRINGER (JOV) \ud83e\ude80": "Manipulate strings through filtering", "SWIZZLE (JOV) \ud83d\ude35": "Swap components between two vectors based on specified swizzle patterns and values", "TEXT GEN (JOV) \ud83d\udcdd": "Generates images containing text based on parameters such as font, size, alignment, color, and position", "THRESHOLD (JOV) \ud83d\udcc9": "Define a range and apply it to an image for segmentation and feature extraction", diff --git a/pyproject.toml b/pyproject.toml index 06033ec..44608db 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "jovimetrix" description = "Integrates Webcam, MIDI, Spout and GLSL shader support. Animation via tick. Parameter manipulation with wave generator. Math operations with Unary and Binary support. Value conversion for all major types (int, string, list, dict, Image, Mask). Shape mask generation, image stacking and channel ops, batch splitting, merging and randomizing, load images and video from anywhere, dynamic bus routing with a single node, export support for GIPHY, save output anywhere! flatten, crop, transform; check colorblindness, make stereogram or stereoscopic images, or liner interpolate values and more." -version = "1.2.30" +version = "1.2.31" license = { file = "LICENSE" } dependencies = [ "aenum>=3.1.15,<4", diff --git a/web/nodes/array.js b/web/nodes/array.js index 3119cb1..cdff67f 100644 --- a/web/nodes/array.js +++ b/web/nodes/array.js @@ -1,5 +1,5 @@ /** - * File: batcher.js + * File: array.js * Project: Jovimetrix * */ diff --git a/web/nodes/stringer.js b/web/nodes/stringer.js new file mode 100644 index 0000000..e47efaa --- /dev/null +++ b/web/nodes/stringer.js @@ -0,0 +1,21 @@ +/** + * File: stringer.js + * Project: Jovimetrix + * + */ + +import { app } from "../../../scripts/app.js" +import { nodeAddDynamic } from '../util/util_node.js' + +const _id = "STRINGER (JOV) πŸͺ€" +const _prefix = '❔' + +app.registerExtension({ + name: 'jovimetrix.node.' + _id, + async beforeRegisterNodeDef(nodeType, nodeData) { + if (nodeData.name !== _id) { + return; + } + nodeType = nodeAddDynamic(nodeType, _prefix); + } +})