Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Zero-Dim] support dist/cov/det output 0D #52857

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
ecd202e
add 0d support for dist, trace, paddle.linalg.cond test=allcase
GGBond8488 Apr 12, 2023
9f5dfd9
add_0d_output_support_for_det
GGBond8488 Apr 12, 2023
11b8412
test=allcase
GGBond8488 Apr 12, 2023
fe49c1b
support_0d_output_for_linalg.norm
GGBond8488 Apr 13, 2023
dc4332c
support linalg.norm 0d output, test=allcase
GGBond8488 Apr 14, 2023
667e7a1
fix 0D test
GGBond8488 Apr 16, 2023
df25e31
fix zero dim test, test=allcase
GGBond8488 Apr 16, 2023
475bf2f
fix 0D test
GGBond8488 Apr 16, 2023
032e3fb
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
GGBond8488 Apr 17, 2023
6142d94
fix tets,test=allcase
GGBond8488 Apr 17, 2023
6e70adf
fix error,test=allcase
GGBond8488 Apr 17, 2023
dd61c63
fix errors ,test=allcase
GGBond8488 Apr 17, 2023
035cadf
add static backward , test=allcase
GGBond8488 Apr 17, 2023
e5146f1
add static backwward test, test=allcase
GGBond8488 Apr 17, 2023
13dfb05
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
GGBond8488 Apr 17, 2023
3e87da3
fix conflicts
GGBond8488 Apr 18, 2023
bdf944b
fix pr-ci-build error;test=document_fix (#53060)
tianshuo78520a Apr 19, 2023
00b7c81
[Cherry-Pick] Unique support float16&bfloat16 (#53023)
ZzSean Apr 19, 2023
a8e3aa3
slogdet_support_0D_output
GGBond8488 Apr 17, 2023
3ec138d
add new case
GGBond8488 Apr 20, 2023
97126d2
merge develop
GGBond8488 Apr 20, 2023
e3e2abf
fix tests, test=allcase
GGBond8488 Apr 20, 2023
374a371
merge norm
GGBond8488 Apr 20, 2023
58cd314
merge
GGBond8488 Apr 20, 2023
13928ce
merge
GGBond8488 Apr 20, 2023
cf133b4
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
GGBond8488 Apr 20, 2023
21235c7
fix p_norm related test, test=allcase
GGBond8488 Apr 20, 2023
40f7bb3
fix some err, test=allcase
GGBond8488 Apr 21, 2023
6fab1c7
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
GGBond8488 Apr 21, 2023
f2d14a6
fix conflicts
GGBond8488 Apr 21, 2023
f304d4c
test=allcase
GGBond8488 Apr 21, 2023
8dca1eb
move out trace
GGBond8488 Apr 22, 2023
8ed5522
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
GGBond8488 Apr 23, 2023
ec56e5f
open some case, test=allcase
GGBond8488 Apr 24, 2023
6269092
open some case, test=allcase
GGBond8488 Apr 24, 2023
fd10479
fix conflicts, test=allcase
GGBond8488 Apr 24, 2023
55b8046
Merge branch 'add_0d_output_support_for_det' of github.com:GGBond8488…
GGBond8488 Apr 24, 2023
b832793
fix norm all case, test=allcase
GGBond8488 Apr 26, 2023
fff8e6e
merge develop
GGBond8488 Apr 27, 2023
927e3ec
fix some test error, test=allcase
GGBond8488 Apr 27, 2023
9c9d8c5
fix typro,test=allcase
GGBond8488 Apr 27, 2023
2400312
fix test err, test=allcase
GGBond8488 Apr 27, 2023
7f72ff0
test=allcase
GGBond8488 Apr 27, 2023
254c31f
test
GGBond8488 Apr 27, 2023
14c3de2
fix test error, test=allcase
GGBond8488 Apr 28, 2023
d05cd30
fix test error, test=allcase
GGBond8488 Apr 28, 2023
16a552d
fallback norm, test=allcase
GGBond8488 Apr 28, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -990,7 +990,7 @@ void DistInferMeta(const MetaTensor& x,
"The Input(Y) has not been initialized properly. The "
"shape of Input(Y) = [%s].",
y_dims));
out->set_dims({1});
out->set_dims(phi::make_ddim({}));
out->set_dtype(x.dtype());
}

Expand Down
1 change: 0 additions & 1 deletion paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2767,7 +2767,6 @@ void PNormInferMeta(const MetaTensor& x,
if (reduce_dims.size() == 0) {
reduce_dims.emplace_back(1);
}

x_dim[axis] = 1;
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/impl/determinant_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,10 @@ void DeterminantGradKernel(const Context& dev_ctx,
" input tensor's, but here differ %d",
input_dims_size - out_grad.dims().size()));
} else if (input_dims_size == 2) {
// input dims size 2 and grad dims size 1 is possible
// input dims size 2 and grad dims size 0 is possible
PADDLE_ENFORCE_EQ(
out_grad.dims().size(),
1,
0,
phi::errors::InvalidArgument(
"The grad tensor of det dims size should be 2 less than"
" input tensor's, but here differ %d",
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/determinant_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ void DeterminantKernel(const Context& dev_ctx,
out->Resize(output_dims);
} else {
// when input is a two-dimension matrix, The det value is a number.
out->Resize({1});
out->Resize(phi::make_ddim({}));
}
VLOG(10) << "output dim:" << out->dims();
}
Expand Down
243 changes: 243 additions & 0 deletions python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2546,6 +2546,107 @@ def test_multi_dot(self):
self.assertEqual(b.grad.shape, [4, 5])
self.assertEqual(c.grad.shape, [5])

def test_cov(self):
xt = paddle.randn((3, 4))
xt.stop_gradient = False
xt_1 = paddle.randn((12,))
xt_1.stop_gradient = False

xt_out = paddle.linalg.cov(xt)
xt_out.retain_grads()
xt_out.backward()
self.assertEqual(xt_out.shape, [3, 3])
self.assertEqual(xt.grad.shape, [3, 4])

xt_1_out = paddle.linalg.cov(xt_1)
xt_1.retain_grads()
xt_1_out.backward()
self.assertEqual(xt_1_out.shape, [])
self.assertEqual(xt_1.grad.shape, [12])

def test_det(self):
xt = paddle.randn([3, 3, 3])
xt.stop_gradient = False
xt_1 = paddle.randn([3, 3])
xt_1.stop_gradient = False

xt_out = paddle.linalg.det(xt)
xt.retain_grads()
xt_out.backward()
self.assertEqual(xt_out.shape, [3])
self.assertEqual(xt.grad.shape, [3, 3, 3])

xt_1_out = paddle.linalg.det(xt_1)
xt_1.retain_grads()
xt_1_out.backward()
self.assertEqual(xt_1_out.shape, [])
self.assertEqual(xt_1.grad.shape, [3, 3])

def test_dist(self):
x = paddle.to_tensor([[3, 3], [3, 3]], dtype="float32")
y = paddle.to_tensor([[3, 3], [3, 1]], dtype="float32")
x.stop_gradient = False
y.stop_gradient = False
out = paddle.dist(x, y, 0)
out.backward()

self.assertEqual(out.shape, [])
np.testing.assert_allclose(out, np.array(1))
self.assertEqual(x.grad.shape, [2, 2])
self.assertEqual(y.grad.shape, [2, 2])

def test_linalg_cond(self):
def assert_shape(out):
self.assertEqual(out.shape, [])

# x1 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x1.stop_gradient = False
# p = 2 : use paddle.sum
# out = paddle.linalg.cond(x1)
# assert_shape(out)

# p = fro : use paddle.sum
# x2 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x2.stop_gradient = False
# out_fro = paddle.linalg.cond(x2, p='fro')
# assert_shape(out_fro)

# p = nuc : use paddle.sum
# x3 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x3.stop_gradient = False
# out_nuc = paddle.linalg.cond(x3, p='nuc')
# assert_shape(out_nuc)

# p in (-1, 1) : use paddle.sum
# x4 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x4.stop_gradient = False
# out_1 = paddle.linalg.cond(x4, p=1)
# assert_shape(out_1)
# x5 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x5.stop_gradient = False
# out_minus_1 = paddle.linalg.cond(x5, p=-1)
# assert_shape(out_minus_1)

# p in (-2, 2) depends on paddle.sum
# x6 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x6.stop_gradient = False
# out_2 = paddle.linalg.cond(x6, p=2)
# assert_shape(out_2)

# p in (-inf, inf):use paddle.sum
# x8 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x8.stop_gradient = False
# out_inf = paddle.linalg.cond(x8, p=float("inf"))
# assert_shape(out_inf)

# depends on paddle.sum
# a = paddle.randn([2, 4, 4])
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个有其他的依赖吗,不然可以打开这个case,如果有依赖需要说明

# a.stop_gradient = False
# a_cond_fro = paddle.linalg.cond(a, p='fro')
# a_cond_fro.backward()
# self.assertEqual(len(a_cond_fro.shape), 1)
# self.assertEqual(a.grad.shape, [2, 4, 4])

def test_trace(self):
x = paddle.to_tensor([[3, 2], [1, 9]], dtype="float32")
x.stop_gradient = False
Expand Down Expand Up @@ -4581,6 +4682,148 @@ def test_multi_dot(self):
self.assertEqual(res[2].shape, (4, 5))
self.assertEqual(res[3].shape, (5,))

@prog_scope()
def test_cov(self):
xt_1 = paddle.randn((12,))
xt_1.stop_gradient = False

out = paddle.linalg.cov(xt_1)
paddle.static.append_backward(out)

prog = paddle.static.default_main_program()

res = self.exe.run(prog, fetch_list=[out, xt_1.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (12,))

@prog_scope()
def test_det(self):
xt_1 = paddle.randn((3, 3))
xt_1.stop_gradient = False

out = paddle.linalg.det(xt_1)
paddle.static.append_backward(out.sum())

prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out, xt_1.grad_name])
self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (3, 3))

@prog_scope()
def test_dist(self):
x = paddle.to_tensor([[3, 3], [3, 3]], dtype="float32")
y = paddle.to_tensor([[3, 3], [3, 1]], dtype="float32")
x.stop_gradient = False
y.stop_gradient = False
out = paddle.dist(x, y)
paddle.static.append_backward(out)

prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out, x.grad_name, y.grad_name])

self.assertEqual(res[0].shape, ())
self.assertEqual(res[1].shape, (2, 2))
self.assertEqual(res[1].shape, (2, 2))
np.testing.assert_array_equal(res[0], np.array(2).astype(np.float32))

@prog_scope()
def test_linalg_cond(self):
pass
# use paddle.sum
# x = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x.stop_gradient = False
# out = paddle.linalg.cond(x)
# paddle.static.append_backward(out)

# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out, x.grad_name])

# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))

# p = fro : use paddle.sum
# x2 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x2.stop_gradient = False
# out_fro = paddle.linalg.cond(x2, p='fro')
# paddle.static.append_backward(out_fro)

# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out_fro, x.grad_name])

# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))

# p = nuc : use paddle.sum
# x3 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x3.stop_gradient = False
# out_nuc = paddle.linalg.cond(x3, p='nuc')
# paddle.static.append_backward(out_nuc)

# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out_nuc, x.grad_name])

# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))

# p in (-1, 1) : use paddle.sum
# x4 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x4.stop_gradient = False
# out_1 = paddle.linalg.cond(x4, p=1)
# paddle.static.append_backward(out_1)

# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out_1, x.grad_name])

# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))

# x5 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x5.stop_gradient = False
# out_minus_1 = paddle.linalg.cond(x5, p=-1)
# paddle.static.append_backward(out_minus_1)

# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out_minus_1, x.grad_name])

# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))

# p in (-2, 2) depends on paddle.sum
# x6 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x6.stop_gradient = False
# out_2 = paddle.linalg.cond(x6, p=2)
# paddle.static.append_backward(out_2)

# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out_2, x.grad_name])

# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))

# p in (-inf, inf):use paddle.sum
# x8 = paddle.to_tensor([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
# x8.stop_gradient = False
# out_inf = paddle.linalg.cond(x8, p=float("inf"))
# paddle.static.append_backward(out_inf)

# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[out_inf, x.grad_name])

# self.assertTrue(res[0].shape, ())
# self.assertTrue(res[1].shape, (3, 3))

# depends on paddle.sum
# a = paddle.randn([2, 4, 4])
# a.stop_gradient = False
# a_cond_fro = paddle.linalg.cond(a, p='fro')
# paddle.static.append_backward(a_cond_fro)

# prog = paddle.static.default_main_program()
# res = self.exe.run(prog, fetch_list=[a_cond_fro, a.grad_name])

# self.assertEqual(res[0].shape, (2,))
# self.assertEqual(res[1].shape, (2, 4, 4))

@prog_scope()
def test_trace(self):
x = paddle.to_tensor([[3, 2], [1, 9]], dtype="float32")
Expand Down
7 changes: 6 additions & 1 deletion test/autograd/test_orig2prim.py
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,12 @@ def init_data(self):
}

self.orig2prim_args = (X,)
self.all_ops = ['p_norm', 'reshape_p', 'abs_p', 'reduce_sum_p']
self.all_ops = [
'p_norm',
'reshape_p',
'abs_p',
'reduce_sum_p',
]
self.out_map = {0: self.output['Out']}


Expand Down