Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Lang] [refactor] Deprecate "as_vector=True" in Matrix.to_numpy/to_torch #1046

Merged
merged 36 commits into from
May 28, 2020
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
9ea24d3
[skip ci] first step
archibate May 24, 2020
53ba74c
[skip ci] sep
archibate May 24, 2020
aad89f8
[skip ci] enforce code format
taichi-gardener May 24, 2020
37ada2b
[skip ci] move methods
archibate May 24, 2020
856d870
[skip ci] fix as_vector
archibate May 24, 2020
b2e43b2
[skip ci] deprecate as_vector
archibate May 24, 2020
b3c2edd
[skip ci] fix test
archibate May 24, 2020
d1acb5a
[skip ci] update examples
archibate May 24, 2020
b5c0c5f
[skip ci] empty_copy
archibate May 24, 2020
accfbeb
[skip ci] enforce code format
taichi-gardener May 24, 2020
9d29718
[skip ci] Merge branch 'master' into vector
archibate May 24, 2020
0c1602a
[skip travis] [skip appveyor] the perfect solution
archibate May 24, 2020
b918063
[skip ci] Merge branch 'master' into vector
archibate May 24, 2020
ebdb474
[skip travis] [skip appveyor] apply @yuanming-hu's suggestion
archibate May 24, 2020
f4ef822
Merge branch 'master' into vector
archibate May 25, 2020
d700745
[skip ci] update doc
archibate May 25, 2020
092bd44
[skip ci] pvc #1051
archibate May 25, 2020
a6a6b56
Merge branch 'master' into vector
archibate May 25, 2020
d2f0e8d
[skip ci] use keep_dims
archibate May 25, 2020
4677ab0
[skip ci] fix test
archibate May 25, 2020
deba49a
[skip ci] really fix
archibate May 25, 2020
1306a0c
[skip ci] fix again!
archibate May 25, 2020
0832e8d
[skip ci] did fix linalg
archibate May 25, 2020
cf31691
Merge branch 'master' into vector
archibate May 26, 2020
85bcaca
[skip ci] app2
archibate May 26, 2020
e014073
[skip ci] fix test
archibate May 26, 2020
4171b77
Merge branch 'master' into vector
archibate May 26, 2020
e1ddf6b
[skip ci] enforce code format
taichi-gardener May 26, 2020
ea4297f
[skip ci] fix matrix n/m order
archibate May 27, 2020
878f0fd
[skip ci] Merge branch 'vector' of github.com:archibate/taichi into v…
archibate May 27, 2020
40164ef
Merge branch 'master' into vector
archibate May 27, 2020
3cb89bc
[skip travis] apply update test
archibate May 28, 2020
e488390
[skip ci] Merge branch 'master' into vector
archibate May 28, 2020
5cd750a
hack Vector attrs
archibate May 28, 2020
acabb91
[skip ci] enforce code format
taichi-gardener May 28, 2020
8682a95
[skip ci] update doc
archibate May 28, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions docs/external.rst
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,6 @@ Use ``to_numpy``/``from_numpy``/``to_torch``/``from_torch``:
vec.from_numpy(arr)

arr = vec.to_numpy()
assert arr.shape == (n, m, 3, 1)

arr = vec.to_numpy(as_vector=True)
assert arr.shape == (n, m, 3)

# Matrix
Expand All @@ -55,6 +52,9 @@ Use ``to_numpy``/``from_numpy``/``to_torch``/``from_torch``:
assert arr.shape == (n, m, 3, 4)


TODO: add API reference


Using external arrays as Taichi kernel parameters
-------------------------------------------------

Expand Down
2 changes: 1 addition & 1 deletion examples/cornell_box.py
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ def render():
render()
interval = 10
if i % interval == 0 and i > 0:
img = color_buffer.to_numpy(as_vector=True) * (1 / (i + 1))
img = color_buffer.to_numpy() * (1 / (i + 1))
img = np.sqrt(img / img.mean() * 0.24)
print("{:.2f} samples/s ({} iters, var={})".format(
interval / (time.time() - last_t), i, np.var(img)))
Expand Down
2 changes: 1 addition & 1 deletion examples/mpm88.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,6 @@ def substep():
substep()

gui.clear(0x112F41)
pos = x.to_numpy(as_vector=True)
pos = x.to_numpy()
gui.circles(pos, radius=1.5, color=0x068587)
gui.show()
2 changes: 1 addition & 1 deletion examples/quadtree.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,5 +61,5 @@ def vec2_npf32(m):
pos = gui.get_cursor_pos()
action(vec2_npf32(pos))
paint()
gui.set_image(img.to_numpy(as_vector=True))
gui.set_image(img.to_numpy())
gui.show()
2 changes: 1 addition & 1 deletion examples/sdf_renderer.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def render():
if i % interval == 0 and i > 0:
print("{:.2f} samples/s".format(interval / (time.time() - last_t)))
last_t = time.time()
img = color_buffer.to_numpy(as_vector=True) * (1 / (i + 1))
img = color_buffer.to_numpy() * (1 / (i + 1))
img = img / img.mean() * 0.24
gui.set_image(np.sqrt(img))
gui.show()
2 changes: 1 addition & 1 deletion examples/stable_fluid.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def main():
mouse_data = md_gen(gui)
step(mouse_data)

img = color_buffer.to_numpy(as_vector=True)
img = color_buffer.to_numpy()
gui.set_image(img)
gui.show()

Expand Down
9 changes: 2 additions & 7 deletions python/taichi/lang/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from .impl import *
from .matrix import Matrix
from .matrix import Matrix, Vector
from .transformer import TaichiSyntaxError
from .ndrange import ndrange, GroupedNDRange
from copy import deepcopy as _deepcopy
Expand All @@ -16,7 +16,6 @@
ij = indices(0, 1)
ijk = indices(0, 1, 2)
ijkl = indices(0, 1, 2, 3)
Vector = Matrix

outer_product = Matrix.outer_product
cross = Matrix.cross
Expand Down Expand Up @@ -180,11 +179,7 @@ def cache_l1(v):
block_dim = core.block_dim
cache = core.cache


def inversed(x):
return x.inversed()


inversed = Matrix.inversed
transposed = Matrix.transposed


Expand Down
138 changes: 67 additions & 71 deletions python/taichi/lang/matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,21 @@ def broadcasted(self, other, *args, **kwargs):
class Matrix(TaichiOperations):
is_taichi_class = True

# TODO(archibate): move the last two line to **kwargs,
# since they're not commonly used as positional args.
def __init__(self,
n=1,
m=1,
dt=None,
empty=False,
shape=None,
empty=False,
layout=None,
needs_grad=False,
keep_raw=False,
rows=None,
cols=None):
# TODO: refactor: use multiple initializer like `ti.Matrix.cols([a, b, c])`
# and `ti.Matrix.empty(n, m)` instead of ad-hoc `ti.Matrix(cols=[a, b, c])`.
self.grad = None
if rows is not None or cols is not None:
if rows is not None and cols is not None:
Expand Down Expand Up @@ -145,7 +149,7 @@ def assign(self, other):
self.entries[i].assign(other.entries[i])

def element_wise_binary(self, foo, other):
ret = Matrix(self.n, self.m)
ret = self.empty_copy()
if isinstance(other, Matrix):
assert self.m == other.m and self.n == other.n
for i in range(self.n * self.m):
Expand All @@ -157,7 +161,7 @@ def element_wise_binary(self, foo, other):
return ret

def element_wise_unary(self, foo):
ret = Matrix(self.n, self.m)
ret = self.empty_copy()
for i in range(self.n * self.m):
ret.entries[i] = foo(self.entries[i])
return ret
Expand All @@ -172,8 +176,9 @@ def __matmul__(self, other):
ret(i, j).assign(ret(i, j) + self(i, k) * other(k, j))
return ret

# TODO
def broadcast(self, scalar):
ret = Matrix(self.n, self.m, empty=True)
ret = self.empty_copy()
for i in range(self.n * self.m):
ret.entries[i] = scalar
return ret
Expand Down Expand Up @@ -213,7 +218,7 @@ def place(self, snode):

def subscript(self, *indices):
if self.is_global():
ret = Matrix(self.n, self.m, empty=True)
ret = self.empty_copy()
for i, e in enumerate(self.entries):
ret.entries[i] = impl.subscript(e, *indices)
return ret
Expand Down Expand Up @@ -258,8 +263,14 @@ def __setitem__(self, index, item):
for j in range(self.m):
self(i, j)[index] = item[i][j]

def empty_copy(self):
return Matrix(self.n, self.m, empty=True)

def zeros_copy(self):
return Matrix(self.n, self.m)

def copy(self):
ret = Matrix(self.n, self.m)
ret = self.empty_copy()
ret.entries = copy.copy(self.entries)
return ret

Expand All @@ -282,20 +293,14 @@ def cast(self, dt):
ret.entries[i] = impl.cast(ret.entries[i], dt)
return ret

def abs(self):
ret = self.copy()
for i in range(len(self.entries)):
ret.entries[i] = impl.abs(ret.entries[i])
return ret

def trace(self):
assert self.n == self.m
sum = expr.Expr(self(0, 0))
for i in range(1, self.n):
sum = sum + self(i, i)
return sum

def inverse(self):
def inversed(self):
assert self.n == self.m, 'Only square matrices are invertible'
if self.n == 1:
return Matrix([1 / self(0, 0)])
Expand Down Expand Up @@ -346,32 +351,12 @@ def E(x, y):
raise Exception(
"Inversions of matrices with sizes >= 5 are not supported")

def inversed(self):
return self.inverse()

@staticmethod
def normalized(a, eps=0):
assert a.m == 1
invlen = 1.0 / (Matrix.norm(a) + eps)
return invlen * a

@staticmethod
def floor(a):
b = Matrix(a.n, a.m)
for i in range(len(a.entries)):
b.entries[i] = impl.floor(a.entries[i])
return b

@staticmethod
def outer_product(a, b):
assert a.m == 1
assert b.m == 1
c = Matrix(a.n, b.n)
for i in range(a.n):
for j in range(b.n):
c(i, j).assign(a(i) * b(j))
return c

@staticmethod
def transposed(a):
ret = Matrix(a.m, a.n, empty=True)
Expand Down Expand Up @@ -413,16 +398,6 @@ def E(x, y):
raise Exception(
"Determinants of matrices with sizes >= 5 are not supported")

@staticmethod
def cross(a, b):
assert a.n == 3 and a.m == 1
assert b.n == 3 and b.m == 1
return Matrix([
a(1) * b(2) - a(2) * b(1),
a(2) * b(0) - a(0) * b(2),
a(0) * b(1) - a(1) * b(0),
])

@staticmethod
def diag(dim, val):
ret = Matrix(dim, dim)
Expand Down Expand Up @@ -451,7 +426,7 @@ def atomic_add(self, other):
self.entries[i].atomic_add(other.entries[i])

def make_grad(self):
ret = Matrix(self.n, self.m, empty=True)
ret = self.empty_copy()
for i in range(len(ret.entries)):
ret.entries[i] = self.entries[i].grad
return ret
Expand Down Expand Up @@ -482,10 +457,6 @@ def min(self):
ret = impl.min(ret, self.entries[i])
return ret

def dot(self, other):
assert self.m == 1 and other.m == 1
return (self.transposed(self) @ other).subscript(0, 0)

def fill(self, val):
if isinstance(val, numbers.Number):
val = tuple(
Expand All @@ -507,12 +478,10 @@ def fill(self, val):
from .meta import fill_matrix
fill_matrix(self, val)

def to_numpy(self, as_vector=False):
if as_vector:
assert self.m == 1, "This matrix is not a vector"
dim_ext = (self.n, )
else:
dim_ext = (self.n, self.m)
def to_numpy(self, keep_dims=False):
# Discussion: https://github.com/taichi-dev/taichi/pull/1046#issuecomment-633548858
as_vector = self.m == 1 and not keep_dims
dim_ext = (self.n, ) if as_vector else (self.n, self.m)
ret = np.empty(self.loop_range().shape() + dim_ext,
dtype=to_numpy_type(
self.loop_range().snode().data_type()))
Expand All @@ -522,13 +491,10 @@ def to_numpy(self, as_vector=False):
ti.sync()
return ret

def to_torch(self, as_vector=False, device=None):
def to_torch(self, device=None, keep_dims=False):
import torch
if as_vector:
assert self.m == 1, "This matrix is not a vector"
dim_ext = (self.n, )
else:
dim_ext = (self.n, self.m)
as_vector = self.m == 1 and not keep_dims
dim_ext = (self.n, ) if as_vector else (self.n, self.m)
Copy link
Member

@yuanming-hu yuanming-hu May 25, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's skip self.m as well if it value is 1. len(dim_ext)=

  • 0 for 1x1 matrices
  • 1 for nx1 or 1xn matrices (n != 1)
  • 2 for nxm matrices (n, m != 1)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Then I should change matrix_to_ext_arr to treat the useless 1x1 matrices?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, it seems that we need a keep_dims parameter in matrix_to_ext_arr as well.

Thanks!

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Special treating 1x1 mats sounds unreasonable to me..

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Few people would use 1x1 matrices anyway. Let's just make the behavior consistent: dimensionality of size 1 should be skipped.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wdym? Do we consider 1x1 mat as 1D vector?

Copy link
Member

@yuanming-hu yuanming-hu May 28, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I mean, if the matrix is 1x1, then the numpy array, when keep_dims=False, should have the same shape as the matrix shape, instead of the matrix shape extended by (1, 1).

ret = torch.empty(self.loop_range().shape() + dim_ext,
dtype=to_pytorch_type(
self.loop_range().snode().data_type()),
Expand All @@ -539,31 +505,32 @@ def to_torch(self, as_vector=False, device=None):
ti.sync()
return ret

def from_numpy(self, ndarray):
if len(ndarray.shape) == self.loop_range().dim() + 1:
as_vector = True
assert self.m == 1, "This matrix is not a vector"
else:
as_vector = False
assert len(ndarray.shape) == self.loop_range().dim() + 2
def from_numpy(self, ndarray, keep_dims=False):
archibate marked this conversation as resolved.
Show resolved Hide resolved
as_vector = self.m == 1 and not keep_dims
dim_ext = 1 if as_vector else 2
assert len(ndarray.shape) == self.loop_range().dim() + dim_ext
from .meta import ext_arr_to_matrix
ext_arr_to_matrix(ndarray, self, as_vector)
import taichi as ti
ti.sync()

def from_torch(self, torch_tensor):
return self.from_numpy(torch_tensor.contiguous())
def from_torch(self, torch_tensor, keep_dims=False):
return self.from_numpy(torch_tensor.contiguous(), keep_dims)

def __ti_repr__(self):
yield '['
if self.m != 1:
yield '['

for i in range(self.n):
if i: yield ', '
yield '['
for j in range(self.m):
if j: yield ', '
yield self(i, j)
yield ']'
yield ']'

if self.m != 1:
yield ']'

@staticmethod
def zero(dt, n, m=1):
Expand Down Expand Up @@ -596,3 +563,32 @@ def rotation2d(alpha):
import taichi as ti
return ti.Matrix([[ti.cos(alpha), -ti.sin(alpha)],
[ti.sin(alpha), ti.cos(alpha)]])

def dot(self, other):
assert self.m == 1
assert other.m == 1
return (self.transposed(self) @ other).subscript(0, 0)

@staticmethod
def cross(a, b):
assert a.m == 1 and a.n == 3
assert b.m == 1 and b.n == 3
return Vector([
a(1) * b(2) - a(2) * b(1),
a(2) * b(0) - a(0) * b(2),
a(0) * b(1) - a(1) * b(0),
])

@staticmethod
def outer_product(a, b):
assert a.m == 1
assert b.m == 1
c = Matrix(a.n, b.n)
for i in range(a.n):
for j in range(b.n):
c(i, j).assign(a(i) * b(j))
return c


def Vector(n=1, dt=None, shape=None, **kwargs):
return Matrix(n, 1, dt, shape, **kwargs)
4 changes: 2 additions & 2 deletions tests/python/test_fill.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def values():

@ti.all_archs
def test_fill_matrix_scalar():
val = ti.Vector(2, 3, ti.i32)
val = ti.Matrix(2, 3, ti.i32)

n = 4
m = 7
Expand All @@ -51,7 +51,7 @@ def values():

@ti.all_archs
def test_fill_matrix_matrix():
val = ti.Vector(2, 3, ti.i32)
val = ti.Matrix(2, 3, ti.i32)

n = 4
m = 7
Expand Down
Loading