diff --git a/docs/external.rst b/docs/external.rst index f457f85753177..64633a63f9184 100644 --- a/docs/external.rst +++ b/docs/external.rst @@ -41,11 +41,11 @@ Use ``to_numpy``/``from_numpy``/``to_torch``/``from_torch``: vec.from_numpy(arr) arr = vec.to_numpy() - assert arr.shape == (n, m, 3, 1) - - arr = vec.to_numpy(as_vector=True) assert arr.shape == (n, m, 3) + arr = vec.to_numpy(keep_dims=True) + assert arr.shape == (n, m, 3, 1) + # Matrix arr = np.ones(shape=(n, m, 3, 4), dtype=np.int32) @@ -55,6 +55,9 @@ Use ``to_numpy``/``from_numpy``/``to_torch``/``from_torch``: assert arr.shape == (n, m, 3, 4) +TODO: add API reference + + Using external arrays as Taichi kernel parameters ------------------------------------------------- diff --git a/examples/cornell_box.py b/examples/cornell_box.py index fbe44fd4a5822..1c1718c246a8b 100644 --- a/examples/cornell_box.py +++ b/examples/cornell_box.py @@ -450,7 +450,7 @@ def render(): render() interval = 10 if i % interval == 0 and i > 0: - img = color_buffer.to_numpy(as_vector=True) * (1 / (i + 1)) + img = color_buffer.to_numpy() * (1 / (i + 1)) img = np.sqrt(img / img.mean() * 0.24) print("{:.2f} samples/s ({} iters, var={})".format( interval / (time.time() - last_t), i, np.var(img))) diff --git a/examples/mpm88.py b/examples/mpm88.py index 5ac064a00c225..28b0307635a01 100644 --- a/examples/mpm88.py +++ b/examples/mpm88.py @@ -88,6 +88,6 @@ def substep(): substep() gui.clear(0x112F41) - pos = x.to_numpy(as_vector=True) + pos = x.to_numpy() gui.circles(pos, radius=1.5, color=0x068587) gui.show() diff --git a/examples/quadtree.py b/examples/quadtree.py index ce617e8f0831b..e1c481fda38b3 100644 --- a/examples/quadtree.py +++ b/examples/quadtree.py @@ -61,5 +61,5 @@ def vec2_npf32(m): pos = gui.get_cursor_pos() action(vec2_npf32(pos)) paint() - gui.set_image(img.to_numpy(as_vector=True)) + gui.set_image(img.to_numpy()) gui.show() diff --git a/examples/sdf_renderer.py b/examples/sdf_renderer.py index ace3495c5fd78..bc9c6502edb3b 100644 --- a/examples/sdf_renderer.py +++ b/examples/sdf_renderer.py @@ -157,7 +157,7 @@ def render(): if i % interval == 0 and i > 0: print("{:.2f} samples/s".format(interval / (time.time() - last_t))) last_t = time.time() - img = color_buffer.to_numpy(as_vector=True) * (1 / (i + 1)) + img = color_buffer.to_numpy() * (1 / (i + 1)) img = img / img.mean() * 0.24 gui.set_image(np.sqrt(img)) gui.show() diff --git a/examples/stable_fluid.py b/examples/stable_fluid.py index 6199cdc1abbe8..77181968ff05d 100644 --- a/examples/stable_fluid.py +++ b/examples/stable_fluid.py @@ -260,7 +260,7 @@ def main(): mouse_data = md_gen(gui) step(mouse_data) - img = color_buffer.to_numpy(as_vector=True) + img = color_buffer.to_numpy() gui.set_image(img) gui.show() diff --git a/python/taichi/lang/__init__.py b/python/taichi/lang/__init__.py index c9db4fa2c0521..e972f855b286b 100644 --- a/python/taichi/lang/__init__.py +++ b/python/taichi/lang/__init__.py @@ -1,5 +1,5 @@ from .impl import * -from .matrix import Matrix +from .matrix import Matrix, Vector from .transformer import TaichiSyntaxError from .ndrange import ndrange, GroupedNDRange from copy import deepcopy as _deepcopy @@ -16,7 +16,6 @@ ij = indices(0, 1) ijk = indices(0, 1, 2) ijkl = indices(0, 1, 2, 3) -Vector = Matrix outer_product = Matrix.outer_product cross = Matrix.cross @@ -180,11 +179,7 @@ def cache_l1(v): block_dim = core.block_dim cache = core.cache - -def inversed(x): - return x.inversed() - - +inversed = Matrix.inversed transposed = Matrix.transposed diff --git a/python/taichi/lang/matrix.py b/python/taichi/lang/matrix.py index 6db47f9aac901..7caa934c1d5e3 100644 --- a/python/taichi/lang/matrix.py +++ b/python/taichi/lang/matrix.py @@ -20,17 +20,21 @@ def broadcasted(self, other, *args, **kwargs): class Matrix(TaichiOperations): is_taichi_class = True + # TODO(archibate): move the last two line to **kwargs, + # since they're not commonly used as positional args. def __init__(self, n=1, m=1, dt=None, - empty=False, shape=None, + empty=False, layout=None, needs_grad=False, keep_raw=False, rows=None, cols=None): + # TODO: refactor: use multiple initializer like `ti.Matrix.cols([a, b, c])` + # and `ti.Matrix.empty(n, m)` instead of ad-hoc `ti.Matrix(cols=[a, b, c])`. self.grad = None if rows is not None or cols is not None: if rows is not None and cols is not None: @@ -145,7 +149,7 @@ def assign(self, other): self.entries[i].assign(other.entries[i]) def element_wise_binary(self, foo, other): - ret = Matrix(self.n, self.m) + ret = self.empty_copy() if isinstance(other, Matrix): assert self.m == other.m and self.n == other.n for i in range(self.n * self.m): @@ -157,7 +161,7 @@ def element_wise_binary(self, foo, other): return ret def element_wise_unary(self, foo): - ret = Matrix(self.n, self.m) + ret = self.empty_copy() for i in range(self.n * self.m): ret.entries[i] = foo(self.entries[i]) return ret @@ -172,8 +176,9 @@ def __matmul__(self, other): ret(i, j).assign(ret(i, j) + self(i, k) * other(k, j)) return ret + # TODO def broadcast(self, scalar): - ret = Matrix(self.n, self.m, empty=True) + ret = self.empty_copy() for i in range(self.n * self.m): ret.entries[i] = scalar return ret @@ -213,7 +218,7 @@ def place(self, snode): def subscript(self, *indices): if self.is_global(): - ret = Matrix(self.n, self.m, empty=True) + ret = self.empty_copy() for i, e in enumerate(self.entries): ret.entries[i] = impl.subscript(e, *indices) return ret @@ -258,8 +263,14 @@ def __setitem__(self, index, item): for j in range(self.m): self(i, j)[index] = item[i][j] + def empty_copy(self): + return Matrix(self.n, self.m, empty=True) + + def zeros_copy(self): + return Matrix(self.n, self.m) + def copy(self): - ret = Matrix(self.n, self.m) + ret = self.empty_copy() ret.entries = copy.copy(self.entries) return ret @@ -282,12 +293,6 @@ def cast(self, dt): ret.entries[i] = impl.cast(ret.entries[i], dt) return ret - def abs(self): - ret = self.copy() - for i in range(len(self.entries)): - ret.entries[i] = impl.abs(ret.entries[i]) - return ret - def trace(self): assert self.n == self.m sum = expr.Expr(self(0, 0)) @@ -295,7 +300,7 @@ def trace(self): sum = sum + self(i, i) return sum - def inverse(self): + def inversed(self): assert self.n == self.m, 'Only square matrices are invertible' if self.n == 1: return Matrix([1 / self(0, 0)]) @@ -346,32 +351,12 @@ def E(x, y): raise Exception( "Inversions of matrices with sizes >= 5 are not supported") - def inversed(self): - return self.inverse() - @staticmethod def normalized(a, eps=0): assert a.m == 1 invlen = 1.0 / (Matrix.norm(a) + eps) return invlen * a - @staticmethod - def floor(a): - b = Matrix(a.n, a.m) - for i in range(len(a.entries)): - b.entries[i] = impl.floor(a.entries[i]) - return b - - @staticmethod - def outer_product(a, b): - assert a.m == 1 - assert b.m == 1 - c = Matrix(a.n, b.n) - for i in range(a.n): - for j in range(b.n): - c(i, j).assign(a(i) * b(j)) - return c - @staticmethod def transposed(a): ret = Matrix(a.m, a.n, empty=True) @@ -413,16 +398,6 @@ def E(x, y): raise Exception( "Determinants of matrices with sizes >= 5 are not supported") - @staticmethod - def cross(a, b): - assert a.n == 3 and a.m == 1 - assert b.n == 3 and b.m == 1 - return Matrix([ - a(1) * b(2) - a(2) * b(1), - a(2) * b(0) - a(0) * b(2), - a(0) * b(1) - a(1) * b(0), - ]) - @staticmethod def diag(dim, val): ret = Matrix(dim, dim) @@ -451,7 +426,7 @@ def atomic_add(self, other): self.entries[i].atomic_add(other.entries[i]) def make_grad(self): - ret = Matrix(self.n, self.m, empty=True) + ret = self.empty_copy() for i in range(len(ret.entries)): ret.entries[i] = self.entries[i].grad return ret @@ -496,10 +471,6 @@ def all(self): ret = ret + (self.entries[i] != ti.expr_init(0)) return -(ret == ti.expr_init(-len(self.entries))) - def dot(self, other): - assert self.m == 1 and other.m == 1 - return (self.transposed(self) @ other).subscript(0, 0) - def fill(self, val): if isinstance(val, numbers.Number): val = tuple( @@ -521,12 +492,10 @@ def fill(self, val): from .meta import fill_matrix fill_matrix(self, val) - def to_numpy(self, as_vector=False): - if as_vector: - assert self.m == 1, "This matrix is not a vector" - dim_ext = (self.n, ) - else: - dim_ext = (self.n, self.m) + def to_numpy(self, keep_dims=False): + # Discussion: https://github.com/taichi-dev/taichi/pull/1046#issuecomment-633548858 + as_vector = self.m == 1 and not keep_dims + dim_ext = (self.n, ) if as_vector else (self.n, self.m) ret = np.empty(self.loop_range().shape() + dim_ext, dtype=to_numpy_type( self.loop_range().snode().data_type())) @@ -536,13 +505,10 @@ def to_numpy(self, as_vector=False): ti.sync() return ret - def to_torch(self, as_vector=False, device=None): + def to_torch(self, device=None, keep_dims=False): import torch - if as_vector: - assert self.m == 1, "This matrix is not a vector" - dim_ext = (self.n, ) - else: - dim_ext = (self.n, self.m) + as_vector = self.m == 1 and not keep_dims + dim_ext = (self.n, ) if as_vector else (self.n, self.m) ret = torch.empty(self.loop_range().shape() + dim_ext, dtype=to_pytorch_type( self.loop_range().snode().data_type()), @@ -560,6 +526,8 @@ def from_numpy(self, ndarray): else: as_vector = False assert len(ndarray.shape) == self.loop_range().dim() + 2 + dim_ext = 1 if as_vector else 2 + assert len(ndarray.shape) == self.loop_range().dim() + dim_ext from .meta import ext_arr_to_matrix ext_arr_to_matrix(ndarray, self, as_vector) import taichi as ti @@ -569,15 +537,19 @@ def from_torch(self, torch_tensor): return self.from_numpy(torch_tensor.contiguous()) def __ti_repr__(self): - yield '[' - for i in range(self.n): - if i: yield ', ' + if self.m != 1: yield '[' - for j in range(self.m): - if j: yield ', ' + + for j in range(self.m): + if j: yield ', ' + yield '[' + for i in range(self.n): + if i: yield ', ' yield self(i, j) yield ']' - yield ']' + + if self.m != 1: + yield ']' @staticmethod def zero(dt, n, m=1): @@ -616,3 +588,40 @@ def __hash__(self): # If not, we get `unhashable type: Matrix` when # using matrices as template arguments. return id(self) + + def dot(self, other): + assert self.m == 1 + assert other.m == 1 + return (self.transposed(self) @ other).subscript(0, 0) + + @staticmethod + def cross(a, b): + assert a.m == 1 and a.n == 3 + assert b.m == 1 and b.n == 3 + return Vector([ + a(1) * b(2) - a(2) * b(1), + a(2) * b(0) - a(0) * b(2), + a(0) * b(1) - a(1) * b(0), + ]) + + @staticmethod + def outer_product(a, b): + assert a.m == 1 + assert b.m == 1 + c = Matrix(a.n, b.n) + for i in range(a.n): + for j in range(b.n): + c(i, j).assign(a(i) * b(j)) + return c + + +def Vector(n=1, dt=None, shape=None, **kwargs): + return Matrix(n, 1, dt, shape, **kwargs) + + +Vector.zero = Matrix.zero +Vector.one = Matrix.one +Vector.dot = Matrix.dot +Vector.cross = Matrix.cross +Vector.outer_product = Matrix.outer_product +Vector.unit = Matrix.unit diff --git a/tests/python/test_fill.py b/tests/python/test_fill.py index 1f42957cae829..9dc1807816b8c 100644 --- a/tests/python/test_fill.py +++ b/tests/python/test_fill.py @@ -25,7 +25,7 @@ def values(): @ti.all_archs def test_fill_matrix_scalar(): - val = ti.Vector(2, 3, ti.i32) + val = ti.Matrix(2, 3, ti.i32) n = 4 m = 7 @@ -51,7 +51,7 @@ def values(): @ti.all_archs def test_fill_matrix_matrix(): - val = ti.Vector(2, 3, ti.i32) + val = ti.Matrix(2, 3, ti.i32) n = 4 m = 7 diff --git a/tests/python/test_linalg.py b/tests/python/test_linalg.py index 5ca046208b3e8..94bbdc7152eab 100644 --- a/tests/python/test_linalg.py +++ b/tests/python/test_linalg.py @@ -120,7 +120,7 @@ def invert(): invert() - m_np = m.to_numpy() + m_np = m.to_numpy(keep_dims=True) np.testing.assert_almost_equal(m_np, np.linalg.inv(M)) diff --git a/tests/python/test_mpm88.py b/tests/python/test_mpm88.py index 2b9433543674d..122d80b6a1adc 100644 --- a/tests/python/test_mpm88.py +++ b/tests/python/test_mpm88.py @@ -89,7 +89,7 @@ def substep(): grid_m.fill(0) substep() - pos = x.to_numpy(as_vector=True) + pos = x.to_numpy() pos[:, 1] *= 2 regression = [ 0.31722742, diff --git a/tests/python/test_numpy_io.py b/tests/python/test_numpy_io.py index de57deac6eb72..59b5326deacf8 100644 --- a/tests/python/test_numpy_io.py +++ b/tests/python/test_numpy_io.py @@ -132,19 +132,27 @@ def test_numpy_io_example(): # Vector arr = np.ones(shape=(n, m, 3), dtype=np.int32) vec.from_numpy(arr) + arr = np.ones(shape=(n, m, 3, 1), dtype=np.int32) vec.from_numpy(arr) - arr = vec.to_numpy() - assert arr.shape == (n, m, 3, 1) + arr = np.ones(shape=(n, m, 1, 3), dtype=np.int32) + vec.from_numpy(arr) - arr = vec.to_numpy(as_vector=True) + arr = vec.to_numpy() assert arr.shape == (n, m, 3) + arr = vec.to_numpy(keep_dims=True) + assert arr.shape == (n, m, 3, 1) + # Matrix arr = np.ones(shape=(n, m, 3, 4), dtype=np.int32) mat.from_numpy(arr) + arr = mat.to_numpy() assert arr.shape == (n, m, 3, 4) + arr = mat.to_numpy(keep_dims=True) + assert arr.shape == (n, m, 3, 4) + # For PyTorch tensors, use to_torch/from_torch instead diff --git a/tests/python/test_torch_io.py b/tests/python/test_torch_io.py index b07f64992a09c..a0716f827b2f6 100644 --- a/tests/python/test_torch_io.py +++ b/tests/python/test_torch_io.py @@ -210,16 +210,16 @@ def test_shape_matrix(): @ti.torch_test def test_shape_vector(): n = 12 - x = ti.Matrix(3, 1, ti.f32, shape=(n, n)) - X = x.to_torch(as_vector=True) + x = ti.Vector(3, ti.f32, shape=(n, n)) + X = x.to_torch() for i in range(n): for j in range(n): for k in range(3): X[i, j, k] = i * 10 + j + k * 100 x.from_torch(X) - X1 = x.to_torch(as_vector=True) + X1 = x.to_torch() x.from_torch(X1) - X1 = x.to_torch(as_vector=True) + X1 = x.to_torch() assert (X == X1).all()