forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
BinaryOpsKernel.cpp
109 lines (97 loc) · 3.39 KB
/
BinaryOpsKernel.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#include <cmath>
#include <iostream>
#include <ATen/Dispatch.h>
#include <ATen/Parallel.h>
#include <ATen/cpu/vec256/vec256.h>
#include <ATen/cpu/vec256/functional.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/cpu/Loops.h>
namespace at { namespace native {
namespace {
using namespace vec256;
void add_kernel(TensorIterator& iter, Scalar alpha_scalar) {
if (iter.dtype() == ScalarType::Bool) {
auto alpha = alpha_scalar.to<bool>();
cpu_kernel(iter, [=](bool a, bool b) -> bool { return a + b * alpha; });
} else {
AT_DISPATCH_ALL_TYPES_AND(kBFloat16, iter.dtype(), "add_cpu/sub_cpu", [&]() {
auto alpha = alpha_scalar.to<scalar_t>();
auto alpha_vec = Vec256<scalar_t>(alpha);
cpu_kernel_vec(iter,
[=](scalar_t a, scalar_t b) -> scalar_t { return a + alpha * b; },
[=](Vec256<scalar_t> a, Vec256<scalar_t> b) {
return vec256::fmadd(b, alpha_vec, a);
});
});
}
}
void atan2_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "atan2_cpu", [&]() {
cpu_kernel_vec(iter, [=](scalar_t a, scalar_t b) -> scalar_t {
return std::atan2(a, b);
},
[=](Vec256<scalar_t> a, Vec256<scalar_t> b) {
return a.atan2(b);
});
});
}
void sub_kernel(TensorIterator& iter, Scalar alpha_scalar) {
add_kernel(iter, -alpha_scalar);
}
void mul_kernel(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
cpu_kernel(iter, [=](bool a, bool b) -> bool { return a && b; });
} else {
AT_DISPATCH_ALL_TYPES_AND(kBFloat16, iter.dtype(), "mul_cpu", [&]() {
cpu_kernel_vec(iter,
[=](scalar_t a, scalar_t b) -> scalar_t { return a * b; },
[=](Vec256<scalar_t> a, Vec256<scalar_t> b) {
return a * b;
});
});
}
}
void div_kernel(TensorIterator& iter) {
if (isIntegralType(iter.dtype(), /*includeBool*/ false)) {
// There's no SIMD integer division, so don't try to vectorize it.
// TODO: if the divisor is a scalar, rewrite as multiplication by a constant.
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "div_cpu", [&]() {
cpu_kernel(iter, [](scalar_t a, scalar_t b) -> scalar_t {
return a / b;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND(kBFloat16, iter.dtype(), "div_cpu", [&]() {
cpu_kernel_vec(iter,
[=](scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
return a / b;
},
[=](Vec256<scalar_t> a, Vec256<scalar_t> b) {
return a / b;
});
});
}
}
void logical_xor_kernel(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(1), "logical_xor_cpu", [&]() {
using self_t = scalar_t;
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(2), "logical_xor_cpu", [&]() {
using other_t = scalar_t;
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(0), "logical_xor_cpu", [&]() {
cpu_kernel(iter,
[](self_t a, other_t b) -> scalar_t {
return static_cast<scalar_t>(bool(a) != bool(b));
});
});
});
});
}
} // anonymous namespace
REGISTER_DISPATCH(add_stub, &add_kernel);
REGISTER_DISPATCH(sub_stub, &sub_kernel);
REGISTER_DISPATCH(mul_stub, &mul_kernel);
REGISTER_DISPATCH(div_stub, &div_kernel);
REGISTER_DISPATCH(atan2_stub, &atan2_kernel);
REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel);
}} // namespace at::native