Skip to content

Commit

Permalink
Add support for more mixed Complex-Real operations
Browse files Browse the repository at this point in the history
This change adds support (almost all of) the same mixed Complex-Real operations that nim's complex module support.

This also changes a bit some of the existing mixed ops. In particular, we used to support mixed ops of Complex64 with _any_ kind of number (including integers) but they did not support Complex32 - float32 ops. While being able to mix Complex64 with ints (for example) was nice, it was not consistent with nim's own complex library and also it would have required adding many more operator overloads, so I decided to just be consistent at the cost of a little usability in a small number of cases.
  • Loading branch information
AngelEzquerra committed Mar 26, 2024
1 parent d6acd3a commit 3147a4f
Show file tree
Hide file tree
Showing 3 changed files with 301 additions and 59 deletions.
74 changes: 74 additions & 0 deletions src/arraymancer/tensor/operators_blas_l1.nim
Original file line number Diff line number Diff line change
Expand Up @@ -45,21 +45,45 @@ proc `+`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Ten
## Tensor addition
map2_inline(a, b, x + y)

proc `+`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit.} =
## Mixed Complex[T] + Real Tensor addition
map2_inline(a, b, x + y)

proc `+`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} =
## Mixed Real + Complex[T] Tensor addition
map2_inline(a, b, x + y)

proc `-`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Tensor[T] {.noinit.} =
## Tensor substraction
map2_inline(a, b, x - y)

proc `-`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit.} =
## Mixed Complex[T] - Real Tensor subtraction
map2_inline(a, b, x - y)

proc `-`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} =
## Mixed Real - Complex[T] Tensor subtraction
map2_inline(a, b, x - y)

# #########################################################
# # Tensor-Tensor in-place linear algebra

proc `+=`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: var Tensor[T], b: Tensor[T]) =
## Tensor in-place addition
a.apply2_inline(b, x + y)

proc `+=`*[T: SomeNumber](a: var Tensor[Complex[T]], b: Tensor[T]) =
## Mixed Complex + Real Tensor in-place addition
a.apply2_inline(b, x + y)

proc `-=`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: var Tensor[T], b: Tensor[T]) =
## Tensor in-place substraction
a.apply2_inline(b, x - y)

proc `-=`*[T: SomeNumber](a: var Tensor[Complex[T]], b: Tensor[T]) =
## Mixed Complex - Real Tensor in-place substraction
a.apply2_inline(b, x - y)

# #########################################################
# # Tensor-scalar linear algebra

Expand All @@ -68,10 +92,28 @@ proc `*`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: T, t: Tensor[T]):
returnEmptyIfEmpty(t)
t.map_inline(x * a)

proc `*`*[T: SomeNumber](a: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} =
## Element-wise mixed Real * Complex multiplication by a scalar
returnEmptyIfEmpty(t)
t.map_inline(x * a)

proc `*`*[T: SomeNumber](a: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit.} =
## Element-wise mixed Complex * Real multiplication by a scalar
returnEmptyIfEmpty(t)
t.map_inline(x * a)

proc `*`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], a: T): Tensor[T] {.noinit.} =
## Element-wise multiplication by a scalar
a * t

proc `*`*[T: SomeNumber](t: Tensor[Complex[T]], a: T): Tensor[Complex[T]] {.noinit.} =
## Element-wise mixed Complex * Real multiplication by a scalar
a * t

proc `*`*[T: SomeNumber](t: Tensor[T], a: Complex[T]): Tensor[Complex[T]] {.noinit.} =
## Element-wise mixed Real * Complex multiplication by a scalar
a * t

proc `/`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], a: T): Tensor[T] {.noinit.} =
## Element-wise division by a float scalar
returnEmptyIfEmpty(t)
Expand All @@ -80,6 +122,26 @@ proc `/`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], a: T):
else:
t.map_inline(x / a)

proc `/`*[T: SomeNumber](a: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} =
## Element-wise mixed Real scalar / Complex tensor division
returnEmptyIfEmpty(t)
t.map_inline(a / x )

proc `/`*[T: SomeNumber](a: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit.} =
## Element-wise mixed Complex scalar / Real tensor division
returnEmptyIfEmpty(t)
t.map_inline(a / x)

proc `/`*[T: SomeNumber](t: Tensor[Complex[T]], a: T): Tensor[Complex[T]] {.noinit.} =
## Element-wise mixed Complex tensor / Real scalar division
returnEmptyIfEmpty(t)
t.map_inline(x / a)

proc `/`*[T: SomeNumber](t: Tensor[T], a: Complex[T]): Tensor[Complex[T]] {.noinit.} =
## Element-wise mixed Real tensor / Complex scalar division
returnEmptyIfEmpty(t)
t.map_inline(x / a)

proc `div`*[T: SomeInteger](t: Tensor[T], a: T): Tensor[T] {.noinit.} =
## Element-wise division by an integer
returnEmptyIfEmpty(t)
Expand Down Expand Up @@ -123,12 +185,24 @@ proc `*=`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: var Tensor[T], a:
return
t.apply_inline(x * a)

proc `*=`*[T: SomeNumber](t: var Tensor[Complex[T]], a: T) =
## Element-wise mixed Complex * Real multiplication by a scalar (in-place)
if t.size == 0:
return
t.apply_inline(x * a)

proc `/=`*[T: SomeFloat|Complex[float32]|Complex[float64]](t: var Tensor[T], a: T) =
## Element-wise division by a scalar (in-place)
if t.size == 0:
return
t.apply_inline(x / a)

proc `/=`*[T: SomeNumber](t: var Tensor[Complex[T]], a: T) =
## Element-wise mixed Complex / Real division by a scalar (in-place)
if t.size == 0:
return
t.apply_inline(x / a)

proc `/=`*[T: SomeInteger](t: var Tensor[T], a: T) =
## Element-wise division by a scalar (in-place)
if t.size == 0:
Expand Down
238 changes: 180 additions & 58 deletions src/arraymancer/tensor/operators_broadcasted.nim
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,13 @@ proc `/.`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Te
else:
result = map2_inline(tmp_a, tmp_b, x / y )

proc `^.`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Tensor[T] {.noinit.} =
## Tensor element-wise exponentiation
##
## And broadcasted element-wise exponentiation.
let (tmp_a, tmp_b) = broadcast2(a, b)
result = map2_inline(tmp_a, tmp_b, pow(x, y))

proc `mod`*[T: SomeNumber](a, b: Tensor[T]): Tensor[T] {.noinit.} =
## Tensor element-wise modulo operation
##
Expand Down Expand Up @@ -195,81 +202,196 @@ proc `/.=`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: var Tensor[T], v


# #################################################
# # Mixed Complex64 tensor - real scalar operations
# # Mixed complex - real tensor operations
#
# It is always possible to operate on a Complex64 tensor with a real scalar
# without loss of precission, so we allow it without explicit type conversion.
# This makes complex tensor arithmetic more convenient to use.
# Since nim's built-in complex module supports mixed complex-real operations
# we allow them too (but in tensor form). This makes such mixed arithmetic
# more efficient in addition to more convenient to use.

proc `+.`*[T: SomeNumber](val: T, t: Tensor[Complex64]): Tensor[Complex64] {.noinit, inline.} =
## Broadcasted addition for real scalar + Complex64 tensor
##
## The scalar is automatically converted to Complex64 before the operation.
let complex_val = complex(float64(val))
complex_val +. t
proc `+.`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit,inline.} =
## Broadcasted addition for tensors of incompatible but broadcastable shape.
#check_shape(a, b, relaxed_rank1_check = RelaxedRankOne)
result = map2_inline(a, b, x + y)

proc `+.`*[T: SomeNumber](t: Tensor[Complex64], val: T): Tensor[Complex64] {.noinit, inline.} =
## Broadcasted addition for real scalar + Complex64 tensor
##
## The scalar is automatically converted to Complex64 before the operation.
let complex_val = complex(float64(val))
t +. complex_val
proc `+.`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit,inline.} =
## Broadcasted addition for tensors of incompatible but broadcastable shape.
#check_shape(a, b, relaxed_rank1_check = RelaxedRankOne)
result = map2_inline(a, b, x + y)

proc `-.`*[T: SomeNumber](val: T, t: Tensor[Complex64]): Tensor[Complex64] {.noinit, inline.} =
## Broadcasted subtraction for real scalar - Complex64 tensor
##
## The scalar is automatically converted to Complex64 before the operation.
let complex_val = complex(float64(val))
complex_val -. t
proc `-.`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit,inline.} =
## Broadcasted subtraction for tensors of incompatible but broadcastable shape.
#check_shape(a, b, relaxed_rank1_check = RelaxedRankOne)
result = map2_inline(a, b, x - y)

proc `-.`*[T: SomeNumber](t: Tensor[Complex64], val: T): Tensor[Complex64] {.noinit, inline.} =
## Broadcasted subtraction for real scalar - Complex64 tensor
##
## The scalar is automatically converted to Complex64 before the operation.
let complex_val = complex(float64(val))
t -. complex_val
proc `-.`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit,inline.} =
## Broadcasted subtraction for tensors of incompatible but broadcastable shape.
#check_shape(a, b, relaxed_rank1_check = RelaxedRankOne)
result = map2_inline(a, b, x - y)

proc `*.`*[T: SomeNumber](val: T, t: Tensor[Complex64]): Tensor[Complex64] {.noinit, inline.} =
## Broadcasted multiplication for real scalar * Complex64 tensor
proc `*.`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit.} =
## Element-wise multiplication (Hadamard product).
##
## The scalar is automatically converted to Complex64 before the operation.
let complex_val = complex(float64(val))
complex_val *. t
## And broadcasted element-wise multiplication.
#check_shape(a, b, relaxed_rank1_check = RelaxedRankOne)
result = map2_inline(a, b, x * y)

proc `*.`*[T: SomeNumber](t: Tensor[Complex64], val: T): Tensor[Complex64] {.noinit, inline.} =
## Broadcasted multiplication for real scalar * Complex64 tensor
proc `*.`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} =
## Element-wise multiplication (Hadamard product).
##
## The scalar is automatically converted to Complex64 before the operation.
let complex_val = complex(float64(val))
t *. complex_val
## And broadcasted element-wise multiplication.
#check_shape(a, b, relaxed_rank1_check = RelaxedRankOne)
result = map2_inline(a, b, x * y)

proc `/.`*[T: SomeNumber](val: T, t: Tensor[Complex64]): Tensor[Complex64] {.noinit, inline.} =
## Broadcasted division for real scalar / Complex64 tensor
proc `/.`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit.} =
## Tensor element-wise division
##
## The scalar is automatically converted to Complex64 before the operation.
let complex_val = complex(float64(val))
complex_val /. t
## And broadcasted element-wise division.
#check_shape(a, b, relaxed_rank1_check = RelaxedRankOne)
when T is SomeInteger:
result = map2_inline(a, b, x div y )
else:
result = map2_inline(a, b, x / y )

proc `/.`*[T: SomeNumber](t: Tensor[Complex64], val: T): Tensor[Complex64] {.noinit, inline.} =
## Broadcasted division for real scalar / Complex64 tensor
proc `/.`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} =
## Tensor element-wise division
##
## The scalar is automatically converted to Complex64 before the operation.
let complex_val = complex(float64(val))
t /. complex_val
## And broadcasted element-wise division.
#check_shape(a, b, relaxed_rank1_check = RelaxedRankOne)
when T is SomeInteger:
result = map2_inline(a, b, x div y )
else:
result = map2_inline(a, b, x / y )

proc `^.`*[T: SomeNumber](val: T, t: Tensor[Complex64]): Tensor[Complex64] {.noinit, inline.} =
## Broadcasted exponentiation for real scalar ^ Complex64 tensor
proc `^.`*[T: SomeFloat](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit.} =
## Tensor element-wise exponentiation for real complex ^ scalar tensors
#check_shape(a, b, relaxed_rank1_check = RelaxedRankOne)
result = map2_inline(a, b, pow(x, complex(y)))

proc `^.`*[T: SomeFloat](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} =
## Tensor element-wise exponentiation for real scalar ^ complex tensors
#check_shape(a, b, relaxed_rank1_check = RelaxedRankOne)
result = map2_inline(a, b, pow(complex(x), y))

proc `mod`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit.} =
## Tensor element-wise modulo operation
##
## The scalar is automatically converted to Complex64 before the operation.
let complex_val = complex(float64(val))
complex_val ^. t
## And broadcasted element-wise modulo operation.
#check_shape(a, b, relaxed_rank1_check = RelaxedRankOne)
result = map2_inline(a, b, x mod y)

proc `^.`*[T: SomeNumber](t: Tensor[Complex64], val: T): Tensor[Complex64] {.noinit, inline.} =
## Broadcasted exponentiation for real scalar ^ Complex64 tensor
proc `mod`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} =
## Tensor element-wise modulo operation
##
## The scalar is automatically converted to Complex64 before the operation.
let complex_val = complex(float64(val))
t ^. complex_val
## And broadcasted element-wise modulo operation.
#check_shape(a, b, relaxed_rank1_check = RelaxedRankOne)
result = map2_inline(a, b, x mod y)

# #################################################
# # Mixed complex tensor - real scalar operations
#
# Since nim's built-in complex module supports mixed complex-real operations
# we allow them too (but in tensor form). This makes such mixed arithmetic
# more efficient in addition to more convenient to use.

proc `+.`*[T: SomeNumber](val: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted addition for real scalar + complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(val + x)

proc `+.`*[T: SomeNumber](val: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted addition for real scalar + complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(val + x)

proc `+.`*[T: SomeNumber](t: Tensor[Complex[T]], val: T): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted addition for real scalar + complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(x + val)

proc `+.`*[T: SomeNumber](t: Tensor[T], val: Complex[T]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted addition for real scalar + complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(x + val)

proc `-.`*[T: SomeNumber](val: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted subtraction for real scalar - complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(val - x)

proc `-.`*[T: SomeNumber](val: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted subtraction for real scalar - complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(val + x)

proc `-.`*[T: SomeNumber](t: Tensor[Complex[T]], val: T): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted subtraction for real scalar - complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(x - val)

proc `-.`*[T: SomeNumber](t: Tensor[T], val: Complex[T]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted subtraction for real scalar - complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(x - val)

proc `*.`*[T: SomeNumber](val: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted multiplication for real scalar * complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(val * x)

proc `*.`*[T: SomeNumber](val: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted multiplication for real scalar * complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(val * x)

proc `*.`*[T: SomeNumber](t: Tensor[Complex[T]], val: T): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted multiplication for real scalar * complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(x * val)

proc `*.`*[T: SomeNumber](t: Tensor[T], val: Complex[T]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted multiplication for real scalar * complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(x * val)

proc `/.`*[T: SomeNumber](val: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted division for real scalar / complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(val / x)

proc `/.`*[T: SomeNumber](val: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted division for real scalar / complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(val / x)

proc `/.`*[T: SomeNumber](t: Tensor[Complex[T]], val: T): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted division for real scalar / complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(x / val)

proc `/.`*[T: SomeNumber](t: Tensor[T], val: Complex[T]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted division for real scalar / complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(x / val)

proc `^.`*[T: SomeFloat](val: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted exponentiation for real scalar ^ complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(pow(complex(val), x))

proc `^.`*[T: SomeFloat](val: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted exponentiation for real scalar ^ complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(pow(val, x))

proc `^.`*[T: SomeFloat](t: Tensor[Complex[T]], val: T): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted exponentiation for real scalar ^ complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(pow(x, val))

proc `^.`*[T: SomeFloat](t: Tensor[T], val: Complex[T]): Tensor[Complex[T]] {.noinit, inline.} =
## Broadcasted exponentiation for real scalar ^ complex tensor
returnEmptyIfEmpty(t)
result = t.map_inline(pow(complex(x), val))

proc `+.=`*[T: SomeNumber](t: var Tensor[Complex64], val: T) {.inline.} =
## Complex64 tensor in-place addition with a real scalar.
Expand Down
Loading

0 comments on commit 3147a4f

Please sign in to comment.