From df9baa630023087ae2227dc24c691e74d84e7a8c Mon Sep 17 00:00:00 2001 From: PaddlePaddle-Gardener Date: Fri, 14 Jan 2022 14:21:25 +0800 Subject: [PATCH] mirgate_38881 --- .../unittests/test_imperative_numpy_bridge.py | 1 + .../test_imperative_using_non_zero_gpu.py | 17 ++++++++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py b/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py index 4f3089baff..7b8d31ff03 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py @@ -42,6 +42,7 @@ class TestImperativeNumpyBridge(unittest.TestCase): self.assertEqual(data_np[0][0], -1) if _in_eager_mode(): # eager_mode, var2 is EagerTensor, is not subscriptable + # TODO(wuweilong): to support slice in eager mode later self.assertNotEqual(var2.numpy()[0][0], -1) else: self.assertNotEqual(var2[0][0].numpy()[0], -1) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py b/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py index f2dfaef397..46a89efcec 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import paddle import paddle.fluid as fluid import unittest -from paddle.fluid.dygraph import to_variable, Embedding, guard +from paddle.fluid.dygraph import to_variable, guard import numpy as np +from paddle.fluid.framework import _test_eager_guard class TestImperativeUsingNonZeroGpu(unittest.TestCase): @@ -24,12 +26,21 @@ class TestImperativeUsingNonZeroGpu(unittest.TestCase): var = to_variable(np_arr) self.assertTrue(np.array_equal(np_arr, var.numpy())) - def test_non_zero_gpu(self): + def func_non_zero_gpu(self): if not fluid.is_compiled_with_cuda(): return np_arr = np.random.random([11, 13]).astype('float32') - self.run_main(np_arr, fluid.CUDAPlace(0)) + if paddle.device.cuda.device_count() > 1: + # should use non zero gpu if there are more than 1 gpu + self.run_main(np_arr, fluid.CUDAPlace(1)) + else: + self.run_main(np_arr, fluid.CUDAPlace(0)) + + def test_non_zero_gpu(self): + with _test_eager_guard(): + self.func_non_zero_gpu() + self.func_non_zero_gpu() if __name__ == '__main__': -- Gitee