From 6b210573ca0e81bfab8e66cdb5958f56a6b10d93 Mon Sep 17 00:00:00 2001 From: Paddle CI_MAC Date: Thu, 2 Sep 2021 14:32:24 +0800 Subject: [PATCH] mirgate_35175 --- .../fluid/tests/unittests/test_var_base.py | 376 +++++++++++++++++- python/paddle/tensor/to_string.py | 77 ++-- 2 files changed, 425 insertions(+), 28 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index 76c871f372..c94316c748 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -65,16 +65,24 @@ class TestVarBase(unittest.TestCase): y = clone_x**2 y.backward() self.assertTrue( - np.array_equal(x.grad, np.array([2.4]).astype('float32'))) + np.array_equal(x.grad.numpy(), + np.array([2.4]).astype('float32'))) y = x.cpu() self.assertEqual(y.place.__repr__(), "CPUPlace") if core.is_compiled_with_cuda(): y = x.pin_memory() self.assertEqual(y.place.__repr__(), "CUDAPinnedPlace") + y = x.cuda() + y = x.cuda(None) + self.assertEqual(y.place.__repr__(), "CUDAPlace(0)") + y = x.cuda(device_id=0) + self.assertEqual(y.place.__repr__(), "CUDAPlace(0)") y = x.cuda(blocking=False) self.assertEqual(y.place.__repr__(), "CUDAPlace(0)") y = x.cuda(blocking=True) self.assertEqual(y.place.__repr__(), "CUDAPlace(0)") + with self.assertRaises(ValueError): + y = x.cuda("test") # support 'dtype' is core.VarType x = paddle.rand((2, 2)) @@ -142,6 +150,91 @@ class TestVarBase(unittest.TestCase): self.assertEqual(y.dtype, core.VarDesc.VarType.COMPLEX64) self.assertEqual(y.shape, [2]) + paddle.set_default_dtype('float32') + x = paddle.randn([3, 4]) + x_array = np.array(x) + self.assertEqual(x_array.shape, x.numpy().shape) + self.assertEqual(x_array.dtype, x.numpy().dtype) + self.assertTrue(np.array_equal(x_array, x.numpy())) + + x = paddle.to_tensor(1.0) + self.assertEqual(x.item(), 1.0) + self.assertTrue(isinstance(x.item(), float)) + + x = paddle.randn([3, 2, 2]) + self.assertTrue(isinstance(x.item(5), float)) + self.assertTrue(isinstance(x.item(1, 0, 1), float)) + self.assertEqual(x.item(5), x.item(1, 0, 1)) + self.assertTrue( + np.array_equal(x.item(1, 0, 1), x.numpy().item(1, 0, 1))) + + x = paddle.to_tensor([[1.111111, 2.222222, 3.333333]]) + self.assertEqual(x.item(0, 2), x.item(2)) + self.assertAlmostEqual(x.item(2), 3.333333) + self.assertTrue(isinstance(x.item(0, 2), float)) + + x = paddle.to_tensor(1.0, dtype='float64') + self.assertEqual(x.item(), 1.0) + self.assertTrue(isinstance(x.item(), float)) + + x = paddle.to_tensor(1.0, dtype='float16') + self.assertEqual(x.item(), 1.0) + self.assertTrue(isinstance(x.item(), float)) + + x = paddle.to_tensor(1, dtype='uint8') + self.assertEqual(x.item(), 1) + self.assertTrue(isinstance(x.item(), int)) + + x = paddle.to_tensor(1, dtype='int8') + self.assertEqual(x.item(), 1) + self.assertTrue(isinstance(x.item(), int)) + + x = paddle.to_tensor(1, dtype='int16') + self.assertEqual(x.item(), 1) + self.assertTrue(isinstance(x.item(), int)) + + x = paddle.to_tensor(1, dtype='int32') + self.assertEqual(x.item(), 1) + self.assertTrue(isinstance(x.item(), int)) + + x = paddle.to_tensor(1, dtype='int64') + self.assertEqual(x.item(), 1) + self.assertTrue(isinstance(x.item(), int)) + + x = paddle.to_tensor(True) + self.assertEqual(x.item(), True) + self.assertTrue(isinstance(x.item(), bool)) + + x = paddle.to_tensor(1 + 1j) + self.assertEqual(x.item(), 1 + 1j) + self.assertTrue(isinstance(x.item(), complex)) + + numpy_array = np.random.randn(3, 4) + # covert core.LoDTensor to paddle.Tensor + lod_tensor = paddle.fluid.core.LoDTensor() + place = paddle.fluid.framework._current_expected_place() + lod_tensor.set(numpy_array, place) + x = paddle.to_tensor(lod_tensor) + self.assertTrue(np.array_equal(x.numpy(), numpy_array)) + self.assertEqual(x.type, core.VarDesc.VarType.LOD_TENSOR) + self.assertEqual(str(x.place), str(place)) + + # covert core.Tensor to paddle.Tensor + x = paddle.to_tensor(numpy_array) + dlpack = x.value().get_tensor()._to_dlpack() + tensor_from_dlpack = paddle.fluid.core.from_dlpack(dlpack) + x = paddle.to_tensor(tensor_from_dlpack) + self.assertTrue(np.array_equal(x.numpy(), numpy_array)) + self.assertEqual(x.type, core.VarDesc.VarType.LOD_TENSOR) + + with self.assertRaises(ValueError): + paddle.randn([3, 2, 2]).item() + with self.assertRaises(ValueError): + paddle.randn([3, 2, 2]).item(18) + with self.assertRaises(ValueError): + paddle.randn([3, 2, 2]).item(1, 2) + with self.assertRaises(ValueError): + paddle.randn([3, 2, 2]).item(2, 1, 2) with self.assertRaises(TypeError): paddle.to_tensor('test') with self.assertRaises(TypeError): @@ -160,6 +253,17 @@ class TestVarBase(unittest.TestCase): _test_place("gpu_pinned") _test_place(core.CUDAPlace(0)) _test_place("gpu:0") + if core.is_compiled_with_npu(): + _test_place(core.NPUPlace(0)) + _test_place("npu:0") + + def test_to_tensor_not_change_input_stop_gradient(self): + with paddle.fluid.dygraph.guard(core.CPUPlace()): + a = paddle.zeros([1024]) + a.stop_gradient = False + b = paddle.to_tensor(a) + self.assertEqual(a.stop_gradient, False) + self.assertEqual(b.stop_gradient, True) def test_to_tensor_change_place(self): if core.is_compiled_with_cuda(): @@ -179,6 +283,22 @@ class TestVarBase(unittest.TestCase): a = paddle.to_tensor(a, place=paddle.CUDAPinnedPlace()) self.assertEqual(a.place.__repr__(), "CUDAPinnedPlace") + def test_to_tensor_with_lodtensor(self): + if core.is_compiled_with_cuda(): + a_np = np.random.rand(1024, 1024) + with paddle.fluid.dygraph.guard(core.CPUPlace()): + lod_tensor = core.LoDTensor() + lod_tensor.set(a_np, core.CPUPlace()) + a = paddle.to_tensor(lod_tensor) + self.assertTrue(np.array_equal(a_np, a.numpy())) + + with paddle.fluid.dygraph.guard(core.CUDAPlace(0)): + lod_tensor = core.LoDTensor() + lod_tensor.set(a_np, core.CUDAPlace(0)) + a = paddle.to_tensor(lod_tensor, place=core.CPUPlace()) + self.assertTrue(np.array_equal(a_np, a.numpy())) + self.assertTrue(a.place.__repr__(), "CPUPlace") + def test_to_variable(self): with fluid.dygraph.guard(): var = fluid.dygraph.to_variable(self.array, name="abc") @@ -255,19 +375,21 @@ class TestVarBase(unittest.TestCase): detach_x = x.detach() self.assertTrue(detach_x.stop_gradient, True) + cmp_float = np.allclose if core.is_compiled_with_rocm( + ) else np.array_equal detach_x[:] = 10.0 - self.assertTrue(np.array_equal(x.numpy(), [10.0])) + self.assertTrue(cmp_float(x.numpy(), [10.0])) y = x**2 y.backward() - self.assertTrue(np.array_equal(x.grad, [20.0])) + self.assertTrue(cmp_float(x.grad.numpy(), [20.0])) self.assertEqual(detach_x.grad, None) detach_x.stop_gradient = False # Set stop_gradient to be False, supported auto-grad z = 3 * detach_x**2 z.backward() - self.assertTrue(np.array_equal(x.grad, [20.0])) - self.assertTrue(np.array_equal(detach_x.grad, [60.0])) + self.assertTrue(cmp_float(x.grad.numpy(), [20.0])) + self.assertTrue(cmp_float(detach_x.grad.numpy(), [60.0])) # Due to sharing of data with origin Tensor, There are some unsafe operations: with self.assertRaises(RuntimeError): @@ -473,6 +595,183 @@ class TestVarBase(unittest.TestCase): np.array_equal(local_out[15], tensor_array[::-1, ::-1, ::-1])) self.assertTrue(np.array_equal(local_out[16], tensor_array[-4:4])) + def _test_slice_for_tensor_attr(self): + tensor_array = np.array( + [[[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]], + [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]).astype('float32') + + var = paddle.to_tensor(tensor_array) + + one = paddle.ones(shape=[1], dtype="int32") + two = paddle.full(shape=[1], fill_value=2, dtype="int32") + negative_one = paddle.full(shape=[1], fill_value=-1, dtype="int32") + four = paddle.full(shape=[1], fill_value=4, dtype="int32") + + var = fluid.dygraph.to_variable(tensor_array) + var1 = var[0, one, one] + var2 = var[one:] + var3 = var[0:one] + var4 = var[::negative_one] + var5 = var[one, one:, one:] + var_reshape = fluid.layers.reshape(var, [3, negative_one, 3]) + var6 = var_reshape[:, :, negative_one] + var7 = var[:, :, :negative_one] + var8 = var[:one, :one, :1] + var9 = var[:-1, :negative_one, :negative_one] + var10 = var[::negative_one, :one, :negative_one] + var11 = var[:negative_one, ::-1, negative_one:] + var12 = var[one:2, 2:, ::negative_one] + var13 = var[two:10, 2:, -2:negative_one] + var14 = var[1:negative_one, 0:2, ::negative_one] + var15 = var[::negative_one, ::-1, ::negative_one] + var16 = var[-4:4] + + vars = [ + var, var1, var2, var3, var4, var5, var6, var7, var8, var9, var10, + var11, var12, var13, var14, var15, var16 + ] + local_out = [var.numpy() for var in vars] + + self.assertTrue(np.array_equal(local_out[1], tensor_array[0, 1, 1:2])) + self.assertTrue(np.array_equal(local_out[2], tensor_array[1:])) + self.assertTrue(np.array_equal(local_out[3], tensor_array[0:1])) + self.assertTrue(np.array_equal(local_out[4], tensor_array[::-1])) + self.assertTrue(np.array_equal(local_out[5], tensor_array[1, 1:, 1:])) + self.assertTrue( + np.array_equal(local_out[6], + tensor_array.reshape((3, -1, 3))[:, :, -1])) + self.assertTrue(np.array_equal(local_out[7], tensor_array[:, :, :-1])) + self.assertTrue(np.array_equal(local_out[8], tensor_array[:1, :1, :1])) + self.assertTrue( + np.array_equal(local_out[9], tensor_array[:-1, :-1, :-1])) + self.assertTrue( + np.array_equal(local_out[10], tensor_array[::-1, :1, :-1])) + self.assertTrue( + np.array_equal(local_out[11], tensor_array[:-1, ::-1, -1:])) + self.assertTrue( + np.array_equal(local_out[12], tensor_array[1:2, 2:, ::-1])) + self.assertTrue( + np.array_equal(local_out[13], tensor_array[2:10, 2:, -2:-1])) + self.assertTrue( + np.array_equal(local_out[14], tensor_array[1:-1, 0:2, ::-1])) + self.assertTrue( + np.array_equal(local_out[15], tensor_array[::-1, ::-1, ::-1])) + self.assertTrue(np.array_equal(local_out[16], tensor_array[-4:4])) + + def _test_for_getitem_ellipsis_index(self): + shape = (64, 3, 5, 256) + np_fp32_value = np.random.random(shape).astype('float32') + np_int_value = np.random.randint(1, 100, shape) + + var_fp32 = paddle.to_tensor(np_fp32_value) + var_int = paddle.to_tensor(np_int_value) + + def assert_getitem_ellipsis_index(var_tensor, var_np): + var = [ + var_tensor[..., 0].numpy(), + var_tensor[..., 1, 0].numpy(), + var_tensor[0, ..., 1, 0].numpy(), + var_tensor[1, ..., 1].numpy(), + var_tensor[2, ...].numpy(), + var_tensor[2, 0, ...].numpy(), + var_tensor[2, 0, 1, ...].numpy(), + var_tensor[...].numpy(), + var_tensor[:, ..., 100].numpy(), + ] + + self.assertTrue(np.array_equal(var[0], var_np[..., 0])) + self.assertTrue(np.array_equal(var[1], var_np[..., 1, 0])) + self.assertTrue(np.array_equal(var[2], var_np[0, ..., 1, 0])) + self.assertTrue(np.array_equal(var[3], var_np[1, ..., 1])) + self.assertTrue(np.array_equal(var[4], var_np[2, ...])) + self.assertTrue(np.array_equal(var[5], var_np[2, 0, ...])) + self.assertTrue(np.array_equal(var[6], var_np[2, 0, 1, ...])) + self.assertTrue(np.array_equal(var[7], var_np[...])) + self.assertTrue(np.array_equal(var[8], var_np[:, ..., 100])) + + var_fp32 = paddle.to_tensor(np_fp32_value) + var_int = paddle.to_tensor(np_int_value) + + assert_getitem_ellipsis_index(var_fp32, np_fp32_value) + assert_getitem_ellipsis_index(var_int, np_int_value) + + def _test_none_index(self): + shape = (8, 64, 5, 256) + np_value = np.random.random(shape).astype('float32') + var_tensor = paddle.to_tensor(np_value) + + var = [ + var_tensor[1, 0, None].numpy(), + var_tensor[None, ..., 1, 0].numpy(), + var_tensor[:, :, :, None].numpy(), + var_tensor[1, ..., 1, None].numpy(), + var_tensor[2, ..., None, None].numpy(), + var_tensor[None, 2, 0, ...].numpy(), + var_tensor[None, 2, None, 1].numpy(), + var_tensor[None].numpy(), + var_tensor[0, 0, None, 0, 0, None].numpy(), + var_tensor[None, None, 0, ..., None].numpy(), + var_tensor[0, 1:10:2, None, None, ...].numpy(), + ] + + self.assertTrue(np.array_equal(var[0], np_value[1, 0, None])) + self.assertTrue(np.array_equal(var[1], np_value[None, ..., 1, 0])) + self.assertTrue(np.array_equal(var[2], np_value[:, :, :, None])) + self.assertTrue(np.array_equal(var[3], np_value[1, ..., 1, None])) + self.assertTrue(np.array_equal(var[4], np_value[2, ..., None, None])) + self.assertTrue(np.array_equal(var[5], np_value[None, 2, 0, ...])) + self.assertTrue(np.array_equal(var[6], np_value[None, 2, None, 1])) + self.assertTrue(np.array_equal(var[7], np_value[None])) + self.assertTrue( + np.array_equal(var[8], np_value[0, 0, None, 0, 0, None])) + self.assertTrue( + np.array_equal(var[9], np_value[None, None, 0, ..., None])) + + # TODO(zyfncg) there is a bug of dimensions when slice step > 1 and + # indexs has int type + # self.assertTrue( + # np.array_equal(var[10], np_value[0, 1:10:2, None, None, ...])) + + def _test_bool_index(self): + shape = (4, 2, 5, 64) + np_value = np.random.random(shape).astype('float32') + var_tensor = paddle.to_tensor(np_value) + index = [[True, True, True, True], [True, False, True, True], + [True, False, False, True], [False, 0, 1, True, True]] + index2d = np.array([[True, True], [False, False], [True, False], + [True, True]]) + tensor_index = paddle.to_tensor(index2d) + var = [ + var_tensor[index[0]].numpy(), + var_tensor[index[1]].numpy(), + var_tensor[index[2]].numpy(), + var_tensor[index[3]].numpy(), + var_tensor[paddle.to_tensor(index[0])].numpy(), + var_tensor[tensor_index].numpy(), + ] + self.assertTrue(np.array_equal(var[0], np_value[index[0]])) + self.assertTrue(np.array_equal(var[1], np_value[index[1]])) + self.assertTrue(np.array_equal(var[2], np_value[index[2]])) + self.assertTrue(np.array_equal(var[3], np_value[index[3]])) + self.assertTrue(np.array_equal(var[4], np_value[index[0]])) + self.assertTrue(np.array_equal(var[5], np_value[index2d])) + self.assertTrue( + np.array_equal(var_tensor[var_tensor > 0.67], np_value[np_value > + 0.67])) + self.assertTrue( + np.array_equal(var_tensor[var_tensor < 0.55], np_value[np_value < + 0.55])) + + with self.assertRaises(ValueError): + var_tensor[[False, False, False, False]] + with self.assertRaises(ValueError): + var_tensor[[True, False]] + with self.assertRaises(ValueError): + var_tensor[[True, False, False, False, False]] + with self.assertRaises(IndexError): + var_tensor[paddle.to_tensor([[True, False, False, False]])] + def _test_for_var(self): np_value = np.random.random((30, 100, 100)).astype('float32') w = fluid.dygraph.to_variable(np_value) @@ -483,7 +782,11 @@ class TestVarBase(unittest.TestCase): def test_slice(self): with fluid.dygraph.guard(): self._test_slice() + self._test_slice_for_tensor_attr() self._test_for_var() + self._test_for_getitem_ellipsis_index() + self._test_none_index() + self._test_bool_index() var = fluid.dygraph.to_variable(self.array) self.assertTrue(np.array_equal(var[1, :].numpy(), self.array[1, :])) @@ -631,6 +934,69 @@ class TestVarBase(unittest.TestCase): self.assertEqual(a_str, expected) paddle.enable_static() + def test_tensor_str_shape_with_zero(self): + paddle.disable_static(paddle.CPUPlace()) + x = paddle.ones((10, 10)) + y = paddle.fluid.layers.where(x == 0) + a_str = str(y) + + expected = '''Tensor(shape=[0, 2], dtype=int64, place=CPUPlace, stop_gradient=True, + [])''' + + self.assertEqual(a_str, expected) + paddle.enable_static() + + def test_tensor_str_linewidth(self): + paddle.disable_static(paddle.CPUPlace()) + paddle.seed(2021) + x = paddle.rand([128]) + paddle.set_printoptions( + precision=4, threshold=1000, edgeitems=3, linewidth=80) + a_str = str(x) + + expected = '''Tensor(shape=[128], dtype=float32, place=CPUPlace, stop_gradient=True, + [0.3759, 0.0278, 0.2489, 0.3110, 0.9105, 0.7381, 0.1905, 0.4726, 0.2435, + 0.9142, 0.3367, 0.7243, 0.7664, 0.9915, 0.2921, 0.1363, 0.8096, 0.2915, + 0.9564, 0.9972, 0.2573, 0.2597, 0.3429, 0.2484, 0.9579, 0.7003, 0.4126, + 0.4274, 0.0074, 0.9686, 0.9910, 0.0144, 0.6564, 0.2932, 0.7114, 0.9301, + 0.6421, 0.0538, 0.1273, 0.5771, 0.9336, 0.6416, 0.1832, 0.9311, 0.7702, + 0.7474, 0.4479, 0.3382, 0.5579, 0.0444, 0.9802, 0.9874, 0.3038, 0.5640, + 0.2408, 0.5489, 0.8866, 0.1006, 0.5881, 0.7560, 0.7928, 0.8604, 0.4670, + 0.9285, 0.1482, 0.4541, 0.1307, 0.6221, 0.4902, 0.1147, 0.4415, 0.2987, + 0.7276, 0.2077, 0.7551, 0.9652, 0.4369, 0.2282, 0.0047, 0.2934, 0.4308, + 0.4190, 0.1442, 0.3650, 0.3056, 0.6535, 0.1211, 0.8721, 0.7408, 0.4220, + 0.5937, 0.3123, 0.9198, 0.0275, 0.5338, 0.4622, 0.7521, 0.3609, 0.4703, + 0.1736, 0.8976, 0.7616, 0.3756, 0.2416, 0.2907, 0.3246, 0.4305, 0.5717, + 0.0735, 0.0361, 0.5534, 0.4399, 0.9260, 0.6525, 0.3064, 0.4573, 0.9210, + 0.8269, 0.2424, 0.7494, 0.8945, 0.7098, 0.8078, 0.4707, 0.5715, 0.7232, + 0.4678, 0.5047])''' + + self.assertEqual(a_str, expected) + paddle.enable_static() + + def test_tensor_str_linewidth2(self): + paddle.disable_static(paddle.CPUPlace()) + paddle.seed(2021) + x = paddle.rand([128]) + paddle.set_printoptions(precision=4, linewidth=160, sci_mode=True) + a_str = str(x) + + expected = '''Tensor(shape=[128], dtype=float32, place=CPUPlace, stop_gradient=True, + [3.7587e-01, 2.7798e-02, 2.4891e-01, 3.1097e-01, 9.1053e-01, 7.3811e-01, 1.9045e-01, 4.7258e-01, 2.4354e-01, 9.1415e-01, 3.3666e-01, 7.2428e-01, + 7.6640e-01, 9.9146e-01, 2.9215e-01, 1.3625e-01, 8.0957e-01, 2.9153e-01, 9.5642e-01, 9.9718e-01, 2.5732e-01, 2.5973e-01, 3.4292e-01, 2.4841e-01, + 9.5794e-01, 7.0029e-01, 4.1260e-01, 4.2737e-01, 7.3788e-03, 9.6863e-01, 9.9102e-01, 1.4416e-02, 6.5640e-01, 2.9318e-01, 7.1136e-01, 9.3008e-01, + 6.4209e-01, 5.3849e-02, 1.2730e-01, 5.7712e-01, 9.3359e-01, 6.4155e-01, 1.8320e-01, 9.3110e-01, 7.7021e-01, 7.4736e-01, 4.4793e-01, 3.3817e-01, + 5.5794e-01, 4.4412e-02, 9.8023e-01, 9.8735e-01, 3.0376e-01, 5.6397e-01, 2.4082e-01, 5.4893e-01, 8.8659e-01, 1.0065e-01, 5.8812e-01, 7.5600e-01, + 7.9280e-01, 8.6041e-01, 4.6701e-01, 9.2852e-01, 1.4821e-01, 4.5410e-01, 1.3074e-01, 6.2210e-01, 4.9024e-01, 1.1466e-01, 4.4154e-01, 2.9868e-01, + 7.2758e-01, 2.0766e-01, 7.5508e-01, 9.6522e-01, 4.3688e-01, 2.2823e-01, 4.7394e-03, 2.9342e-01, 4.3083e-01, 4.1902e-01, 1.4416e-01, 3.6500e-01, + 3.0560e-01, 6.5350e-01, 1.2115e-01, 8.7206e-01, 7.4081e-01, 4.2203e-01, 5.9372e-01, 3.1230e-01, 9.1979e-01, 2.7486e-02, 5.3383e-01, 4.6224e-01, + 7.5211e-01, 3.6094e-01, 4.7034e-01, 1.7355e-01, 8.9763e-01, 7.6165e-01, 3.7557e-01, 2.4157e-01, 2.9074e-01, 3.2458e-01, 4.3049e-01, 5.7171e-01, + 7.3509e-02, 3.6087e-02, 5.5341e-01, 4.3993e-01, 9.2601e-01, 6.5248e-01, 3.0640e-01, 4.5727e-01, 9.2104e-01, 8.2688e-01, 2.4243e-01, 7.4937e-01, + 8.9448e-01, 7.0981e-01, 8.0783e-01, 4.7065e-01, 5.7154e-01, 7.2319e-01, 4.6777e-01, 5.0465e-01])''' + + self.assertEqual(a_str, expected) + paddle.enable_static() + def test_print_tensor_dtype(self): paddle.disable_static(paddle.CPUPlace()) a = paddle.rand([1]) diff --git a/python/paddle/tensor/to_string.py b/python/paddle/tensor/to_string.py index 778a391df6..f640882893 100644 --- a/python/paddle/tensor/to_string.py +++ b/python/paddle/tensor/to_string.py @@ -17,7 +17,7 @@ import numpy as np from paddle.fluid.layers import core from paddle.fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype -__all__ = ['set_printoptions'] +__all__ = [] class PrintOptions(object): @@ -34,15 +34,18 @@ DEFAULT_PRINT_OPTIONS = PrintOptions() def set_printoptions(precision=None, threshold=None, edgeitems=None, - sci_mode=None): + sci_mode=None, + linewidth=None): """Set the printing options for Tensor. NOTE: The function is similar with numpy.set_printoptions() Args: precision (int, optional): Number of digits of the floating number, default 8. threshold (int, optional): Total number of elements printed, default 1000. - edgeitems (int, optional): Number of elements in summary at the begining and end of each dimension, defalt 3. + edgeitems (int, optional): Number of elements in summary at the begining and ending of each dimension, default 3. sci_mode (bool, optional): Format the floating number with scientific notation or not, default False. + linewidth (int, optional): Number of characters each line, default 80. + Returns: None. @@ -82,32 +85,39 @@ def set_printoptions(precision=None, check_type(edgeitems, 'edgeitems', (int), 'set_printoptions') DEFAULT_PRINT_OPTIONS.edgeitems = edgeitems kwargs['edgeitems'] = edgeitems + if linewidth is not None: + check_type(linewidth, 'linewidth', (int), 'set_printoptions') + DEFAULT_PRINT_OPTIONS.linewidth = linewidth + kwargs['linewidth'] = linewidth if sci_mode is not None: check_type(sci_mode, 'sci_mode', (bool), 'set_printoptions') DEFAULT_PRINT_OPTIONS.sci_mode = sci_mode kwargs['sci_mode'] = sci_mode - #TODO(zhiqiu): support linewidth core.set_printoptions(**kwargs) -def _to_sumary(var): +def _to_summary(var): edgeitems = DEFAULT_PRINT_OPTIONS.edgeitems + # Handle tensor of shape contains 0, like [0, 2], [3, 0, 3] + if np.prod(var.shape) == 0: + return np.array([]) + if len(var.shape) == 0: return var elif len(var.shape) == 1: if var.shape[0] > 2 * edgeitems: - return np.concatenate([var[:edgeitems], var[-edgeitems:]]) + return np.concatenate([var[:edgeitems], var[(-1 * edgeitems):]]) else: return var else: # recursively handle all dimensions if var.shape[0] > 2 * edgeitems: begin = [x for x in var[:edgeitems]] - end = [x for x in var[-edgeitems:]] - return np.stack([_to_sumary(x) for x in (begin + end)]) + end = [x for x in var[(-1 * edgeitems):]] + return np.stack([_to_summary(x) for x in (begin + end)]) else: - return np.stack([_to_sumary(x) for x in var]) + return np.stack([_to_summary(x) for x in var]) def _format_item(np_var, max_width=0, signed=False): @@ -136,6 +146,7 @@ def _format_item(np_var, max_width=0, signed=False): def _get_max_width(var): + # return max_width for a scalar max_width = 0 signed = False for item in list(var.flatten()): @@ -147,41 +158,61 @@ def _get_max_width(var): return max_width, signed -def _format_tensor(var, sumary, indent=0, max_width=0, signed=False): +def _format_tensor(var, summary, indent=0, max_width=0, signed=False): + """ + Format a tensor + + Args: + var(Tensor): The tensor to be formatted. + summary(bool): Do summary or not. If true, some elements will not be printed, and be replaced with "...". + indent(int): The indent of each line. + max_width(int): The max width of each elements in var. + signed(bool): Print +/- or not. + """ edgeitems = DEFAULT_PRINT_OPTIONS.edgeitems + linewidth = DEFAULT_PRINT_OPTIONS.linewidth if len(var.shape) == 0: # currently, shape = [], i.e., scaler tensor is not supported. # If it is supported, it should be formatted like this. return _format_item(var, max_width, signed) elif len(var.shape) == 1: - if sumary and var.shape[0] > 2 * edgeitems: + item_length = max_width + 2 + items_per_line = (linewidth - indent) // item_length + items_per_line = max(1, items_per_line) + + if summary and var.shape[0] > 2 * edgeitems: items = [ _format_item(item, max_width, signed) - for item in list(var)[:DEFAULT_PRINT_OPTIONS.edgeitems] + for item in list(var)[:edgeitems] ] + ['...'] + [ _format_item(item, max_width, signed) - for item in list(var)[-DEFAULT_PRINT_OPTIONS.edgeitems:] + for item in list(var)[(-1 * edgeitems):] ] else: items = [ _format_item(item, max_width, signed) for item in list(var) ] - s = ', '.join(items) + lines = [ + items[i:i + items_per_line] + for i in range(0, len(items), items_per_line) + ] + s = (',\n' + ' ' * + (indent + 1)).join([', '.join(line) for line in lines]) return '[' + s + ']' else: # recursively handle all dimensions - if sumary and var.shape[0] > 2 * edgeitems: + if summary and var.shape[0] > 2 * edgeitems: vars = [ - _format_tensor(x, sumary, indent + 1, max_width, signed) + _format_tensor(x, summary, indent + 1, max_width, signed) for x in var[:edgeitems] ] + ['...'] + [ - _format_tensor(x, sumary, indent + 1, max_width, signed) - for x in var[-edgeitems:] + _format_tensor(x, summary, indent + 1, max_width, signed) + for x in var[(-1 * edgeitems):] ] else: vars = [ - _format_tensor(x, sumary, indent + 1, max_width, signed) + _format_tensor(x, summary, indent + 1, max_width, signed) for x in var ] @@ -207,14 +238,14 @@ def to_string(var, prefix='Tensor'): for dim in var.shape: size *= dim - sumary = False + summary = False if size > DEFAULT_PRINT_OPTIONS.threshold: - sumary = True + summary = True - max_width, signed = _get_max_width(_to_sumary(np_var)) + max_width, signed = _get_max_width(_to_summary(np_var)) data = _format_tensor( - np_var, sumary, indent=indent, max_width=max_width, signed=signed) + np_var, summary, indent=indent, max_width=max_width, signed=signed) return _template.format( prefix=prefix, -- Gitee