forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ComplexKernel.cu
36 lines (29 loc) · 1.07 KB
/
ComplexKernel.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
namespace {
void complex_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.input_dtype(0), "complex_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> c10::complex<scalar_t> {
return c10::complex<scalar_t>(a, b);
});
});
}
void polar_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.input_dtype(0), "polar_cuda", [&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> c10::complex<scalar_t> {
return c10::complex<scalar_t>(a * std::cos(b), a * std::sin(b));
});
});
}
} // anonymous namespace
REGISTER_DISPATCH(complex_stub, &complex_kernel_cuda);
REGISTER_DISPATCH(polar_stub, &polar_kernel_cuda);
} // namespace at::native