-
Notifications
You must be signed in to change notification settings - Fork 14
/
run.py
43 lines (31 loc) · 1.5 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
#!/usr/bin/env python
import torch
import hadamard # the custom layer
##########################################################
class Network(torch.nn.Module):
def __init__(self):
super().__init__()
# end
def forward(self, tenOne, tenTwo):
return hadamard.hadamard_func.apply(tenOne, tenTwo)
# end
# end
netNetwork = Network().cuda()
for intIter in range(3):
tenOne = torch.rand(size=[2, 3, 8, 8], dtype=torch.float32, device=torch.device('cuda')).requires_grad_()
tenTwo = torch.rand(size=[2, 3, 8, 8], dtype=torch.float32, device=torch.device('cuda')).requires_grad_()
tenOut = netNetwork(tenOne, tenTwo)
tenExpected = torch.mul(tenOne, tenTwo)
print((tenOut.data - tenExpected.data).abs().sum(), '<-- should be 0.0')
print(torch.autograd.gradcheck(func=netNetwork, inputs=tuple([tenOne, tenTwo]), eps=0.001), '<-- should be true')
# end
print('switching to DataParallel mode')
netNetwork = torch.nn.DataParallel(Network()).cuda()
for intIter in range(3):
tenOne = torch.rand(size=[2, 3, 8, 8], dtype=torch.float32, device=torch.device('cuda')).requires_grad_()
tenTwo = torch.rand(size=[2, 3, 8, 8], dtype=torch.float32, device=torch.device('cuda')).requires_grad_()
tenOut = netNetwork(tenOne, tenTwo)
tenExpected = torch.mul(tenOne, tenTwo)
print((tenOut.data - tenExpected.data).abs().sum(), '<-- should be 0.0')
print(torch.autograd.gradcheck(func=netNetwork, inputs=tuple([tenOne, tenTwo]), eps=0.001), '<-- should be true')
# end