Skip to content

Commit

Permalink
missing: adjust, lerp, op binary, value, wave graph, wave gen, text g…
Browse files Browse the repository at this point in the history
…en, swizzle, stack
  • Loading branch information
Amorano committed Jun 23, 2024
1 parent d46bfe1 commit a2d3d5d
Show file tree
Hide file tree
Showing 11 changed files with 52 additions and 97 deletions.
5 changes: 1 addition & 4 deletions core/calc.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ def run(self, **kw) -> Tuple[bool]:
else:
val = [0] * len(val)
case EnumUnaryOperation.MAXIMUM:
val = [max(v)]
val = [max(val)]
case EnumUnaryOperation.MINIMUM:
val = [min(val)]
case _:
Expand Down Expand Up @@ -598,8 +598,6 @@ def run(self, **kw) -> Tuple[Any, Any]:
values = []
params = list(zip_longest_fill(A, B, a_xyzw, b_xyzw, alpha, op, typ))
pbar = ProgressBar(len(params))
print(A, a_xyzw)
print(B, b_xyzw)
for idx, (A, B, a_xyzw, b_xyzw, alpha, op, typ) in enumerate(params):
# make sure we only interpolate between the longest "stride" we can
size = min(3, max(0 if not isinstance(A, (list,)) else len(A), 0 if not isinstance(B, (list,)) else len(B)))
Expand All @@ -610,7 +608,6 @@ def run(self, **kw) -> Tuple[Any, Any]:
# val_a = parse_value(A, EnumConvertType.VEC4, A if A is not None else a_xyzw)
# val_b = parse_value(B, EnumConvertType.VEC4, B if B is not None else b_xyzw)
# alpha = parse_value(alpha, EnumConvertType.VEC4, alpha)
print(val_a, val_b, alpha)
typ = EnumConvertType[typ]
size = max(1, int(typ.value / 10))
if size > 1:
Expand Down
57 changes: 22 additions & 35 deletions core/compose.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from Jovimetrix.sup.util import parse_dynamic, parse_param, \
zip_longest_fill, EnumConvertType
from Jovimetrix.sup.image import \
channel_solid, channel_swap, color_match_histogram, color_match_lut, \
channel_merge, channel_solid, channel_swap, color_match_histogram, color_match_lut, image_filter, image_quantize, image_scalefit, \
color_match_reinhard, cv2tensor_full, image_color_blind, image_contrast,\
image_crop, image_crop_center, image_crop_polygonal, image_equalize, \
image_gamma, image_grayscale, image_hsv, image_levels, image_convert, \
Expand All @@ -28,7 +28,6 @@
image_split, morph_edge_detect, morph_emboss, pixel_eval, tensor2cv, \
color_theory, remap_fisheye, remap_perspective, remap_polar, cv2tensor, \
remap_sphere, image_invert, image_stack, image_mirror, image_blend, \
image_filter, image_quantize, image_scalefit,\
EnumImageType, EnumColorTheory, EnumProjection, EnumScaleMode, \
EnumEdge, EnumMirrorMode, EnumOrientation, EnumPixelSwizzle, EnumBlendType, \
EnumCBDeficiency, EnumCBSimulator, EnumColorMap, EnumAdjustOP, \
Expand Down Expand Up @@ -115,7 +114,6 @@ def run(self, **kw) -> Tuple[torch.Tensor, ...]:
if cc == 4:
alpha = pA[:,:,3]

print(op, radius, amt, lohi, lmh, hsv, contrast, gamma, matte, invert)
match EnumAdjustOP[op]:
case EnumAdjustOP.INVERT:
img_new = image_invert(pA, amt)
Expand Down Expand Up @@ -204,7 +202,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, ...]:
pA[:,:,3] = alpha
images.append(cv2tensor_full(pA, matte))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class BlendNode(JOVBaseNode):
NAME = "BLEND (JOV) ⚗️"
Expand Down Expand Up @@ -302,7 +300,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
img = cv2tensor_full(img, matte)
images.append(img)
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class ColorBlindNode(JOVBaseNode):
NAME = "COLOR BLIND (JOV) 👁‍🗨"
Expand Down Expand Up @@ -343,7 +341,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pA = image_color_blind(pA, deficiency, simulator, severity)
images.append(cv2tensor_full(pA))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class ColorMatchNode(JOVBaseNode):
NAME = "COLOR MATCH (JOV) 💞"
Expand Down Expand Up @@ -427,7 +425,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pA = image_mask_add(pA, mask)
images.append(cv2tensor_full(pA, matte))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class ColorTheoryNode(JOVBaseNode):
NAME = "COLOR THEORY (JOV) 🛞"
Expand Down Expand Up @@ -468,7 +466,7 @@ def run(self, **kw) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
img = (image_invert(s, 1) for s in img)
images.append([cv2tensor(a) for a in img])
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class CropNode(JOVBaseNode):
NAME = "CROP (JOV) ✂️"
Expand Down Expand Up @@ -524,7 +522,7 @@ def run(self, **kw) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
pA = image_crop_center(pA, width, height)
images.append(cv2tensor_full(pA, color))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class FilterMaskNode(JOVBaseNode):
NAME = "FILTER MASK (JOV) 🤿"
Expand Down Expand Up @@ -569,7 +567,7 @@ def run(self, **kw) -> Tuple[Any, ...]:
logger.debug(f"{img.shape}, {type(img)}, {matte.shape}")
images.append([cv2tensor(img), cv2tensor(matte), cv2tensor(mask)])
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class Flatten(JOVBaseNode):
NAME = "FLATTEN (JOV) ⬇️"
Expand Down Expand Up @@ -624,7 +622,7 @@ def run(self, **kw) -> torch.Tensor:
current = cv2.add(current, x)
images.append(cv2tensor_full(current, matte))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class PixelMergeNode(JOVBaseNode):
NAME = "PIXEL MERGE (JOV) 🫂"
Expand Down Expand Up @@ -655,10 +653,10 @@ def INPUT_TYPES(cls) -> dict:
return Lexicon._parse(d, cls)

def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
R = parse_param(kw, Lexicon.R, EnumConvertType.MASK, None)
G = parse_param(kw, Lexicon.G, EnumConvertType.MASK, None)
B = parse_param(kw, Lexicon.B, EnumConvertType.MASK, None)
A = parse_param(kw, Lexicon.A, EnumConvertType.MASK, None)
R = parse_param(kw, Lexicon.R, EnumConvertType.IMAGE, None)
G = parse_param(kw, Lexicon.G, EnumConvertType.IMAGE, None)
B = parse_param(kw, Lexicon.B, EnumConvertType.IMAGE, None)
A = parse_param(kw, Lexicon.A, EnumConvertType.IMAGE, None)
mode = parse_param(kw, Lexicon.MODE, EnumConvertType.STRING, EnumScaleMode.NONE.name)
wihi = parse_param(kw, Lexicon.WH, EnumConvertType.VEC2INT, (512, 512), MIN_IMAGE_SIZE)
sample = parse_param(kw, Lexicon.SAMPLE, EnumConvertType.STRING, EnumInterpolation.LANCZOS4.name)
Expand All @@ -670,27 +668,16 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
images = []
pbar = ProgressBar(len(params))
for idx, (r, g, b, a, mode, wihi, sample, matte) in enumerate(params):
mw, mh = wihi
chan = []
for x in (b, g, r, a):
if x is None:
chan.append(None)
continue
x = tensor2cv(x)
chan.append(x)
h, w = x.shape[:2]
mw = max(mw, w)
mh = max(mh, h)
img = [np.zeros((mh, mw, 1)) if x is None else x for x in chan]
img = np.concatenate(img, 2)
img = [None if x is None else tensor2cv(x) for x in (r,g,b,a)]
img = channel_merge(img)
mode = EnumScaleMode[mode]
if mode != EnumScaleMode.NONE:
w, h = wihi
sample = EnumInterpolation[sample]
img = image_scalefit(img, w, h, mode, sample)
images.append(cv2tensor_full(img, matte))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class PixelSplitNode(JOVBaseNode):
NAME = "PIXEL SPLIT (JOV) 💔"
Expand All @@ -717,12 +704,12 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
pA = parse_param(kw, Lexicon.PIXEL, EnumConvertType.IMAGE, None)
pbar = ProgressBar(len(pA))
for idx, pA in enumerate(pA):
pA = tensor2cv(pA) if pA is not None else channel_solid(chan=EnumImageType.BGRA)
pA = channel_solid(chan=EnumImageType.BGRA) if pA is None else tensor2cv(pA)
pA = image_mask_add(pA)
pA = [cv2tensor(x) for x in image_split(pA)]
pA = [cv2tensor(x, True) for x in image_split(pA)]
images.append(pA)
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class PixelSwapNode(JOVBaseNode):
NAME = "PIXEL SWAP (JOV) 🔃"
Expand Down Expand Up @@ -805,7 +792,7 @@ def swapper(swap_out:EnumPixelSwizzle, swap_in:EnumPixelSwizzle) -> np.ndarray[A
out[:,:,3] = swapper(EnumPixelSwizzle.ALPHA_A, swap_a)[:,:,3]
images.append(cv2tensor_full(out))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class StackNode(JOVBaseNode):
NAME = "STACK (JOV) ➕"
Expand Down Expand Up @@ -899,7 +886,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pA = image_invert(pA, 1)
images.append(cv2tensor_full(pA))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class TransformNode(JOVBaseNode):
NAME = "TRANSFORM (JOV) 🏝️"
Expand Down Expand Up @@ -1001,7 +988,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
pA = image_scalefit(pA, w, h, mode, sample)
images.append(cv2tensor_full(pA, matte))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

'''
class HistogramNode(JOVImageSimple):
Expand Down
12 changes: 7 additions & 5 deletions core/create.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
pA = image_scalefit(pA, width, height, mode, sample)
images.append(cv2tensor_full(pA, matte))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class ShapeNode(JOVBaseNode):
NAME = "SHAPE GEN (JOV) ✨"
Expand Down Expand Up @@ -128,6 +128,7 @@ def INPUT_TYPES(cls) -> dict:

def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
shape = parse_param(kw, Lexicon.SHAPE, EnumConvertType.STRING, EnumShapes.CIRCLE.name)
print(kw[Lexicon.SIDES])
sides = parse_param(kw, Lexicon.SIDES, EnumConvertType.INT, 3, 3, 512)
angle = parse_param(kw, Lexicon.ANGLE, EnumConvertType.FLOAT, 0)
edge = parse_param(kw, Lexicon.EDGE, EnumConvertType.STRING, EnumEdge.CLIP.name)
Expand All @@ -148,6 +149,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
#color = pixel_eval(color, EnumImageType.BGRA)
#matte = pixel_eval(matte, EnumImageType.BGRA)
alpha_m = int(matte[3])
print(sides)
match shape:
case EnumShapes.SQUARE:
pA = shape_quad(width, height, sizeX, sizeX, fill=color[:3], back=matte[:3])
Expand Down Expand Up @@ -183,7 +185,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:

images.append([cv2tensor(pB), cv2tensor(pA), cv2tensor(mask, True)])
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class StereogramNode(JOVBaseNode):
NAME = "STEREOGRAM (JOV) 📻"
Expand Down Expand Up @@ -230,7 +232,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
pA = image_stereogram(pA, depth, divisions, noise, gamma, shift)
images.append(cv2tensor_full(pA))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class StereoscopicNode(JOVBaseNode):
NAME = "STEREOSCOPIC (JOV) 🕶️"
Expand Down Expand Up @@ -380,7 +382,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
img = image_invert(img, 1)
images.append(cv2tensor_full(img, matte))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class WaveGraphNode(JOVBaseNode):
NAME = "WAVE GRAPH (JOV) ▶ ılıılı"
Expand Down Expand Up @@ -430,7 +432,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:
img = graph_sausage(wave[0], bars, width, height, thickness=thick, color_line=rgb_a, color_back=matte)
images.append(cv2tensor_full(img))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

'''
class PurzNode(JOVBaseNode):
Expand Down
2 changes: 1 addition & 1 deletion core/create_glsl.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,4 +114,4 @@ def run(self, ident, **kw) -> List[torch.Tensor]:

self.__last_good = images
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]
3 changes: 0 additions & 3 deletions core/device_midi.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@ def run(self, **kw) -> Tuple[MIDIMessage, bool, int, int, int, int, float]:
self.__device = device
normalize = self.__value / 127.
msg = MIDIMessage(self.__note_on, self.__channel, self.__control, self.__note, self.__value)
print(msg)
return msg, self.__note_on, self.__channel, self.__control, self.__note, self.__value, normalize,

class MIDIFilterEZNode(JOVBaseNode):
Expand Down Expand Up @@ -249,7 +248,6 @@ def __filter(self, data:int, value:str) -> bool:

def run(self, **kw) -> Tuple[bool]:
message: MIDIMessage = parse_param(kw, Lexicon.MIDI, EnumConvertType.ANY, None)
print(message)
note_on = parse_param(kw, Lexicon.ON, EnumConvertType.STRING, MIDINoteOnFilter.IGNORE.name)
chan = parse_param(kw, Lexicon.CHANNEL, EnumConvertType.STRING, "")
ctrl = parse_param(kw, Lexicon.CONTROL, EnumConvertType.STRING, "")
Expand All @@ -260,7 +258,6 @@ def run(self, **kw) -> Tuple[bool]:
results = []
pbar = ProgressBar(len(params))
for idx, (message, note_on, chan, ctrl, note, value, normal) in enumerate(params):
print(message)
note_on = MIDINoteOnFilter[note_on]
if note_on != MIDINoteOnFilter.IGNORE:
if note_on == "TRUE" and message.note_on != True:
Expand Down
2 changes: 1 addition & 1 deletion core/device_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ def run(self, **kw) -> Tuple[torch.Tensor, torch.Tensor]:

if len(images) == 0:
images.append(self.__empty)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]

class StreamWriterNode(JOVBaseNode):
NAME = "STREAM WRITER (JOV) 🎞️"
Expand Down
3 changes: 1 addition & 2 deletions core/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,6 @@ def run(self, ident, **kw) -> Tuple[torch.Tensor]:
self.__history = []
longest_edge = 0
dynamic = parse_dynamic(kw, Lexicon.UNKNOWN, EnumConvertType.FLOAT, 0)
print(dynamic)
# each of the plugs
self.__ax.clear()
for idx, val in enumerate(dynamic):
Expand Down Expand Up @@ -457,7 +456,7 @@ def run(self, **kw) -> None:
pA = channel_solid(w, h)
images.append(cv2tensor_full(pA, matte))
pbar.update_absolute(idx)
return [torch.cat(i, dim=0) for i in list(zip(*images))]
return [torch.cat(i, dim=0) for i in zip(*images)]
'''

class QueueNode(JOVBaseNode):
Expand Down
25 changes: 17 additions & 8 deletions sup/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -565,14 +565,23 @@ def channel_solid(width:int=MIN_IMAGE_SIZE, height:int=MIN_IMAGE_SIZE, color:TYP
return np.full((height, width, 4), color, dtype=np.uint8)

def channel_merge(channel:List[TYPE_IMAGE]) -> TYPE_IMAGE:
ch = [c.shape[:2] if c is not None else (0, 0) for c in channel[:3]]
w = max([c[1] for c in ch])
h = max([c[0] for c in ch])
ch = [np.zeros((h, w), dtype=np.uint8) if c is None else cv2.resize(c, (h, w)) for c in channel[:3]]
if len(channel) == 4:
a = channel[3] if len(channel) == 4 else np.full((h, w), 255, dtype=np.uint8)
ch.append(a)
return cv2.merge(ch)
ch_sizes = [c.shape[:2] if c is not None else (0, 0) for c in channel]
ch_sizes.append([MIN_IMAGE_SIZE, MIN_IMAGE_SIZE])
max_width = max([c[1] for c in ch_sizes])
max_height = max([c[0] for c in ch_sizes])
img = channel_solid(max_width, max_height, chan=EnumImageType.BGRA)
for i, ch in enumerate(channel):
if ch is None:
continue
if ch.shape[:2] != (max_height, max_width):
ch = cv2.resize(ch, (max_width, max_height))
if ch.ndim > 2:
ch = ch[:,:,0]
img[:,:,i] = ch

if len(channel) == 3:
img = img[:, :, :3]
return img

def channel_swap(imageA:TYPE_IMAGE, swap_ot:EnumPixelSwizzle,
imageB:TYPE_IMAGE, swap_in:EnumPixelSwizzle) -> TYPE_IMAGE:
Expand Down
3 changes: 1 addition & 2 deletions sup/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,8 +202,7 @@ def parse_value(val:Any, typ:EnumConvertType, default: Any,
cc, h, w = new_val.shape
if cc > 1:
weights = [0.2989, 0.5870, 0.1140]
new_val = np.dot(new_val[..., :3], weights)
new_val = new_val.reshape(512, 512, 1)
new_val = torch.dot(new_val[..., :3], weights)[:,:,0]

if typ == EnumConvertType.COORD2D:
new_val = {'x': new_val[0], 'y': new_val[1]}
Expand Down
Loading

0 comments on commit a2d3d5d

Please sign in to comment.