From a2d3d5dbe30e1bc1493cc327b07fb1e2a3cbc5c5 Mon Sep 17 00:00:00 2001 From: "Alexander G. Morano" Date: Sat, 22 Jun 2024 23:34:08 -0700 Subject: [PATCH] missing: adjust, lerp, op binary, value, wave graph, wave gen, text gen, swizzle, stack --- core/calc.py | 5 +--- core/compose.py | 57 ++++++++++++++---------------------- core/create.py | 12 ++++---- core/create_glsl.py | 2 +- core/device_midi.py | 3 -- core/device_stream.py | 2 +- core/utility.py | 3 +- sup/image.py | 25 +++++++++++----- sup/util.py | 3 +- web/nodes/shape_generator.js | 36 ----------------------- web/util/util_widget.js | 1 + 11 files changed, 52 insertions(+), 97 deletions(-) delete mode 100644 web/nodes/shape_generator.js diff --git a/core/calc.py b/core/calc.py index d2e95dd..95777d3 100644 --- a/core/calc.py +++ b/core/calc.py @@ -245,7 +245,7 @@ def run(self, **kw) -> Tuple[bool]: else: val = [0] * len(val) case EnumUnaryOperation.MAXIMUM: - val = [max(v)] + val = [max(val)] case EnumUnaryOperation.MINIMUM: val = [min(val)] case _: @@ -598,8 +598,6 @@ def run(self, **kw) -> Tuple[Any, Any]: values = [] params = list(zip_longest_fill(A, B, a_xyzw, b_xyzw, alpha, op, typ)) pbar = ProgressBar(len(params)) - print(A, a_xyzw) - print(B, b_xyzw) for idx, (A, B, a_xyzw, b_xyzw, alpha, op, typ) in enumerate(params): # make sure we only interpolate between the longest "stride" we can size = min(3, max(0 if not isinstance(A, (list,)) else len(A), 0 if not isinstance(B, (list,)) else len(B))) @@ -610,7 +608,6 @@ def run(self, **kw) -> Tuple[Any, Any]: # val_a = parse_value(A, EnumConvertType.VEC4, A if A is not None else a_xyzw) # val_b = parse_value(B, EnumConvertType.VEC4, B if B is not None else b_xyzw) # alpha = parse_value(alpha, EnumConvertType.VEC4, alpha) - print(val_a, val_b, alpha) typ = EnumConvertType[typ] size = max(1, int(typ.value / 10)) if size > 1: diff --git a/core/compose.py b/core/compose.py index 3cac1eb..1e56c82 100644 --- a/core/compose.py +++ b/core/compose.py @@ -19,7 +19,7 @@ from Jovimetrix.sup.util import parse_dynamic, parse_param, \ zip_longest_fill, EnumConvertType from Jovimetrix.sup.image import \ - channel_solid, channel_swap, color_match_histogram, color_match_lut, \ + channel_merge, channel_solid, channel_swap, color_match_histogram, color_match_lut, image_filter, image_quantize, image_scalefit, \ color_match_reinhard, cv2tensor_full, image_color_blind, image_contrast,\ image_crop, image_crop_center, image_crop_polygonal, image_equalize, \ image_gamma, image_grayscale, image_hsv, image_levels, image_convert, \ @@ -28,7 +28,6 @@ image_split, morph_edge_detect, morph_emboss, pixel_eval, tensor2cv, \ color_theory, remap_fisheye, remap_perspective, remap_polar, cv2tensor, \ remap_sphere, image_invert, image_stack, image_mirror, image_blend, \ - image_filter, image_quantize, image_scalefit,\ EnumImageType, EnumColorTheory, EnumProjection, EnumScaleMode, \ EnumEdge, EnumMirrorMode, EnumOrientation, EnumPixelSwizzle, EnumBlendType, \ EnumCBDeficiency, EnumCBSimulator, EnumColorMap, EnumAdjustOP, \ @@ -115,7 +114,6 @@ def run(self, **kw) -> Tuple[torch.Tensor, ...]: if cc == 4: alpha = pA[:,:,3] - print(op, radius, amt, lohi, lmh, hsv, contrast, gamma, matte, invert) match EnumAdjustOP[op]: case EnumAdjustOP.INVERT: img_new = image_invert(pA, amt) @@ -204,7 +202,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, ...]: pA[:,:,3] = alpha images.append(cv2tensor_full(pA, matte)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class BlendNode(JOVBaseNode): NAME = "BLEND (JOV) βš—οΈ" @@ -302,7 +300,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: img = cv2tensor_full(img, matte) images.append(img) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class ColorBlindNode(JOVBaseNode): NAME = "COLOR BLIND (JOV) πŸ‘β€πŸ—¨" @@ -343,7 +341,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: pA = image_color_blind(pA, deficiency, simulator, severity) images.append(cv2tensor_full(pA)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class ColorMatchNode(JOVBaseNode): NAME = "COLOR MATCH (JOV) πŸ’ž" @@ -427,7 +425,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: pA = image_mask_add(pA, mask) images.append(cv2tensor_full(pA, matte)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class ColorTheoryNode(JOVBaseNode): NAME = "COLOR THEORY (JOV) πŸ›ž" @@ -468,7 +466,7 @@ def run(self, **kw) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: img = (image_invert(s, 1) for s in img) images.append([cv2tensor(a) for a in img]) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class CropNode(JOVBaseNode): NAME = "CROP (JOV) βœ‚οΈ" @@ -524,7 +522,7 @@ def run(self, **kw) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: pA = image_crop_center(pA, width, height) images.append(cv2tensor_full(pA, color)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class FilterMaskNode(JOVBaseNode): NAME = "FILTER MASK (JOV) 🀿" @@ -569,7 +567,7 @@ def run(self, **kw) -> Tuple[Any, ...]: logger.debug(f"{img.shape}, {type(img)}, {matte.shape}") images.append([cv2tensor(img), cv2tensor(matte), cv2tensor(mask)]) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class Flatten(JOVBaseNode): NAME = "FLATTEN (JOV) ⬇️" @@ -624,7 +622,7 @@ def run(self, **kw) -> torch.Tensor: current = cv2.add(current, x) images.append(cv2tensor_full(current, matte)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class PixelMergeNode(JOVBaseNode): NAME = "PIXEL MERGE (JOV) πŸ«‚" @@ -655,10 +653,10 @@ def INPUT_TYPES(cls) -> dict: return Lexicon._parse(d, cls) def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: - R = parse_param(kw, Lexicon.R, EnumConvertType.MASK, None) - G = parse_param(kw, Lexicon.G, EnumConvertType.MASK, None) - B = parse_param(kw, Lexicon.B, EnumConvertType.MASK, None) - A = parse_param(kw, Lexicon.A, EnumConvertType.MASK, None) + R = parse_param(kw, Lexicon.R, EnumConvertType.IMAGE, None) + G = parse_param(kw, Lexicon.G, EnumConvertType.IMAGE, None) + B = parse_param(kw, Lexicon.B, EnumConvertType.IMAGE, None) + A = parse_param(kw, Lexicon.A, EnumConvertType.IMAGE, None) mode = parse_param(kw, Lexicon.MODE, EnumConvertType.STRING, EnumScaleMode.NONE.name) wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), MIN_IMAGE_SIZE) sample = parse_param(kw, Lexicon.SAMPLE, EnumConvertType.STRING, EnumInterpolation.LANCZOS4.name) @@ -670,19 +668,8 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: images = [] pbar = ProgressBar(len(params)) for idx, (r, g, b, a, mode, wihi, sample, matte) in enumerate(params): - mw, mh = wihi - chan = [] - for x in (b, g, r, a): - if x is None: - chan.append(None) - continue - x = tensor2cv(x) - chan.append(x) - h, w = x.shape[:2] - mw = max(mw, w) - mh = max(mh, h) - img = [np.zeros((mh, mw, 1)) if x is None else x for x in chan] - img = np.concatenate(img, 2) + img = [None if x is None else tensor2cv(x) for x in (r,g,b,a)] + img = channel_merge(img) mode = EnumScaleMode[mode] if mode != EnumScaleMode.NONE: w, h = wihi @@ -690,7 +677,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: img = image_scalefit(img, w, h, mode, sample) images.append(cv2tensor_full(img, matte)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class PixelSplitNode(JOVBaseNode): NAME = "PIXEL SPLIT (JOV) πŸ’”" @@ -717,12 +704,12 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: pA = parse_param(kw, Lexicon.PIXEL, EnumConvertType.IMAGE, None) pbar = ProgressBar(len(pA)) for idx, pA in enumerate(pA): - pA = tensor2cv(pA) if pA is not None else channel_solid(chan=EnumImageType.BGRA) + pA = channel_solid(chan=EnumImageType.BGRA) if pA is None else tensor2cv(pA) pA = image_mask_add(pA) - pA = [cv2tensor(x) for x in image_split(pA)] + pA = [cv2tensor(x, True) for x in image_split(pA)] images.append(pA) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class PixelSwapNode(JOVBaseNode): NAME = "PIXEL SWAP (JOV) πŸ”ƒ" @@ -805,7 +792,7 @@ def swapper(swap_out:EnumPixelSwizzle, swap_in:EnumPixelSwizzle) -> np.ndarray[A out[:,:,3] = swapper(EnumPixelSwizzle.ALPHA_A, swap_a)[:,:,3] images.append(cv2tensor_full(out)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class StackNode(JOVBaseNode): NAME = "STACK (JOV) βž•" @@ -899,7 +886,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: pA = image_invert(pA, 1) images.append(cv2tensor_full(pA)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class TransformNode(JOVBaseNode): NAME = "TRANSFORM (JOV) 🏝️" @@ -1001,7 +988,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: pA = image_scalefit(pA, w, h, mode, sample) images.append(cv2tensor_full(pA, matte)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] ''' class HistogramNode(JOVImageSimple): diff --git a/core/create.py b/core/create.py index 0680d7a..3010836 100644 --- a/core/create.py +++ b/core/create.py @@ -86,7 +86,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: pA = image_scalefit(pA, width, height, mode, sample) images.append(cv2tensor_full(pA, matte)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class ShapeNode(JOVBaseNode): NAME = "SHAPE GEN (JOV) ✨" @@ -128,6 +128,7 @@ def INPUT_TYPES(cls) -> dict: def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: shape = parse_param(kw, Lexicon.SHAPE, EnumConvertType.STRING, EnumShapes.CIRCLE.name) + print(kw[Lexicon.SIDES]) sides = parse_param(kw, Lexicon.SIDES, EnumConvertType.INT, 3, 3, 512) angle = parse_param(kw, Lexicon.ANGLE, EnumConvertType.FLOAT, 0) edge = parse_param(kw, Lexicon.EDGE, EnumConvertType.STRING, EnumEdge.CLIP.name) @@ -148,6 +149,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: #color = pixel_eval(color, EnumImageType.BGRA) #matte = pixel_eval(matte, EnumImageType.BGRA) alpha_m = int(matte[3]) + print(sides) match shape: case EnumShapes.SQUARE: pA = shape_quad(width, height, sizeX, sizeX, fill=color[:3], back=matte[:3]) @@ -183,7 +185,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: images.append([cv2tensor(pB), cv2tensor(pA), cv2tensor(mask, True)]) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class StereogramNode(JOVBaseNode): NAME = "STEREOGRAM (JOV) πŸ“»" @@ -230,7 +232,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: pA = image_stereogram(pA, depth, divisions, noise, gamma, shift) images.append(cv2tensor_full(pA)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class StereoscopicNode(JOVBaseNode): NAME = "STEREOSCOPIC (JOV) πŸ•ΆοΈ" @@ -380,7 +382,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: img = image_invert(img, 1) images.append(cv2tensor_full(img, matte)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class WaveGraphNode(JOVBaseNode): NAME = "WAVE GRAPH (JOV) β–Ά Δ±lΔ±Δ±lΔ±" @@ -430,7 +432,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: img = graph_sausage(wave[0], bars, width, height, thickness=thick, color_line=rgb_a, color_back=matte) images.append(cv2tensor_full(img)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] ''' class PurzNode(JOVBaseNode): diff --git a/core/create_glsl.py b/core/create_glsl.py index 9174e2b..34a4f8a 100644 --- a/core/create_glsl.py +++ b/core/create_glsl.py @@ -114,4 +114,4 @@ def run(self, ident, **kw) -> List[torch.Tensor]: self.__last_good = images pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] diff --git a/core/device_midi.py b/core/device_midi.py index 103d9ad..05ca49c 100644 --- a/core/device_midi.py +++ b/core/device_midi.py @@ -121,7 +121,6 @@ def run(self, **kw) -> Tuple[MIDIMessage, bool, int, int, int, int, float]: self.__device = device normalize = self.__value / 127. msg = MIDIMessage(self.__note_on, self.__channel, self.__control, self.__note, self.__value) - print(msg) return msg, self.__note_on, self.__channel, self.__control, self.__note, self.__value, normalize, class MIDIFilterEZNode(JOVBaseNode): @@ -249,7 +248,6 @@ def __filter(self, data:int, value:str) -> bool: def run(self, **kw) -> Tuple[bool]: message: MIDIMessage = parse_param(kw, Lexicon.MIDI, EnumConvertType.ANY, None) - print(message) note_on = parse_param(kw, Lexicon.ON, EnumConvertType.STRING, MIDINoteOnFilter.IGNORE.name) chan = parse_param(kw, Lexicon.CHANNEL, EnumConvertType.STRING, "") ctrl = parse_param(kw, Lexicon.CONTROL, EnumConvertType.STRING, "") @@ -260,7 +258,6 @@ def run(self, **kw) -> Tuple[bool]: results = [] pbar = ProgressBar(len(params)) for idx, (message, note_on, chan, ctrl, note, value, normal) in enumerate(params): - print(message) note_on = MIDINoteOnFilter[note_on] if note_on != MIDINoteOnFilter.IGNORE: if note_on == "TRUE" and message.note_on != True: diff --git a/core/device_stream.py b/core/device_stream.py index 9562782..4d0ca21 100644 --- a/core/device_stream.py +++ b/core/device_stream.py @@ -256,7 +256,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]: if len(images) == 0: images.append(self.__empty) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] class StreamWriterNode(JOVBaseNode): NAME = "STREAM WRITER (JOV) 🎞️" diff --git a/core/utility.py b/core/utility.py index 537e967..ce58e27 100644 --- a/core/utility.py +++ b/core/utility.py @@ -399,7 +399,6 @@ def run(self, ident, **kw) -> Tuple[torch.Tensor]: self.__history = [] longest_edge = 0 dynamic = parse_dynamic(kw, Lexicon.UNKNOWN, EnumConvertType.FLOAT, 0) - print(dynamic) # each of the plugs self.__ax.clear() for idx, val in enumerate(dynamic): @@ -457,7 +456,7 @@ def run(self, **kw) -> None: pA = channel_solid(w, h) images.append(cv2tensor_full(pA, matte)) pbar.update_absolute(idx) - return [torch.cat(i, dim=0) for i in list(zip(*images))] + return [torch.cat(i, dim=0) for i in zip(*images)] ''' class QueueNode(JOVBaseNode): diff --git a/sup/image.py b/sup/image.py index c0ee48b..a7ae06c 100644 --- a/sup/image.py +++ b/sup/image.py @@ -565,14 +565,23 @@ def channel_solid(width:int=MIN_IMAGE_SIZE, height:int=MIN_IMAGE_SIZE, color:TYP return np.full((height, width, 4), color, dtype=np.uint8) def channel_merge(channel:List[TYPE_IMAGE]) -> TYPE_IMAGE: - ch = [c.shape[:2] if c is not None else (0, 0) for c in channel[:3]] - w = max([c[1] for c in ch]) - h = max([c[0] for c in ch]) - ch = [np.zeros((h, w), dtype=np.uint8) if c is None else cv2.resize(c, (h, w)) for c in channel[:3]] - if len(channel) == 4: - a = channel[3] if len(channel) == 4 else np.full((h, w), 255, dtype=np.uint8) - ch.append(a) - return cv2.merge(ch) + ch_sizes = [c.shape[:2] if c is not None else (0, 0) for c in channel] + ch_sizes.append([MIN_IMAGE_SIZE, MIN_IMAGE_SIZE]) + max_width = max([c[1] for c in ch_sizes]) + max_height = max([c[0] for c in ch_sizes]) + img = channel_solid(max_width, max_height, chan=EnumImageType.BGRA) + for i, ch in enumerate(channel): + if ch is None: + continue + if ch.shape[:2] != (max_height, max_width): + ch = cv2.resize(ch, (max_width, max_height)) + if ch.ndim > 2: + ch = ch[:,:,0] + img[:,:,i] = ch + + if len(channel) == 3: + img = img[:, :, :3] + return img def channel_swap(imageA:TYPE_IMAGE, swap_ot:EnumPixelSwizzle, imageB:TYPE_IMAGE, swap_in:EnumPixelSwizzle) -> TYPE_IMAGE: diff --git a/sup/util.py b/sup/util.py index 4def0fb..db4f4c3 100644 --- a/sup/util.py +++ b/sup/util.py @@ -202,8 +202,7 @@ def parse_value(val:Any, typ:EnumConvertType, default: Any, cc, h, w = new_val.shape if cc > 1: weights = [0.2989, 0.5870, 0.1140] - new_val = np.dot(new_val[..., :3], weights) - new_val = new_val.reshape(512, 512, 1) + new_val = torch.dot(new_val[..., :3], weights)[:,:,0] if typ == EnumConvertType.COORD2D: new_val = {'x': new_val[0], 'y': new_val[1]} diff --git a/web/nodes/shape_generator.js b/web/nodes/shape_generator.js deleted file mode 100644 index 50c45ae..0000000 --- a/web/nodes/shape_generator.js +++ /dev/null @@ -1,36 +0,0 @@ -/** - * File: shape_generator.js - * Project: Jovimetrix - * - */ - -import { app } from "../../../scripts/app.js" -import { fitHeight } from '../util/util.js' -import { widget_hide, widget_show } from '../util/util_widget.js' - -const _id = "SHAPE GEN (JOV) ✨" - -app.registerExtension({ - name: 'jovimetrix.node.' + _id, - async beforeRegisterNodeDef(nodeType, nodeData, app) { - if (nodeData.name !== _id) { - return; - } - - const onNodeCreated = nodeType.prototype.onNodeCreated - nodeType.prototype.onNodeCreated = function () { - const me = onNodeCreated?.apply(this) - const sides = this.widgets.find(w => w.name === '♾️'); - const op = this.widgets.find(w => w.name === 'πŸ‡ΈπŸ‡΄'); - op.callback = () => { - widget_hide(this, sides); - if (op.value == 'POLYGON') { - widget_show(sides); - } - fitHeight(this); - } - setTimeout(() => { op.callback(); }, 10); - return me; - } - } -}) diff --git a/web/util/util_widget.js b/web/util/util_widget.js index 4fa7577..557d5c2 100644 --- a/web/util/util_widget.js +++ b/web/util/util_widget.js @@ -85,6 +85,7 @@ export function widget_hide(node, widget, suffix = '') { } widget.computeSize = () => [0, -4]; widget.type = CONVERTED_TYPE + suffix; + widget.serializeValue = () => { // Prevent serializing the widget if we have no input linked if (!node.inputs) {