From a9f36ba2fe857d55b3c5d6c33665da8b33a8e847 Mon Sep 17 00:00:00 2001 From: PaddlePaddle-Gardener Date: Thu, 13 Jan 2022 14:22:46 +0800 Subject: [PATCH] mirgate_38888 --- paddle/fluid/operators/complex_op.h | 111 ++++++++++++++++++++++ paddle/fluid/operators/label_smooth_op.cu | 4 +- paddle/fluid/operators/lgamma_op.cu | 2 +- paddle/fluid/operators/matrix_rank_op.h | 6 +- 4 files changed, 117 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/operators/complex_op.h b/paddle/fluid/operators/complex_op.h index e69de29bb2..3dd5ea9f7e 100644 --- a/paddle/fluid/operators/complex_op.h +++ b/paddle/fluid/operators/complex_op.h @@ -0,0 +1,111 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" +#include "paddle/fluid/operators/math/complex_functors.h" +#include "paddle/fluid/platform/complex.h" + +namespace paddle { +namespace operators { + +// functors to use with ElementwiseComputeEx +template +struct RealAndImagToComplexFunctor { + inline HOSTDEVICE platform::complex operator()(const T x, const T y) { + return platform::complex(x, y); + } +}; + +template +struct ImagAndRealToComplexFunctor { + inline HOSTDEVICE platform::complex operator()(const T y, const T x) { + return platform::complex(x, y); + } +}; + +template +struct ComplexGradForRealFunctor { + inline HOSTDEVICE T operator()(const T x, const T y, + const platform::complex out, + const platform::complex dout) { + return dout.real; + } +}; + +template +struct ComplexGradForImagFunctor { + inline HOSTDEVICE T operator()(const T x, const T y, + const platform::complex out, + const platform::complex dout) { + return dout.imag; + } +}; + +template +class ComplexKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + const auto* x = ctx.Input("X"); + const auto* y = ctx.Input("Y"); + auto* z = ctx.Output("Out"); + + using C = platform::complex; + z->mutable_data(ctx.GetPlace()); + +// NOTE(chenfeiyu): be careful of the caveats of calling elementwise-related +// facility functions +#if defined(__NVCC__) || defined(__HIPCC__) + ElementwiseComputeEx, DeviceContext, T, C>( + ctx, x, y, /*axis*/ -1, RealAndImagToComplexFunctor(), z); +#else + auto x_dims = x->dims(); + auto y_dims = y->dims(); + if (x_dims.size() >= y_dims.size()) { + ElementwiseComputeEx, DeviceContext, T, C>( + ctx, x, y, /*axis*/ -1, RealAndImagToComplexFunctor(), z); + } else { + ElementwiseComputeEx, DeviceContext, T, C>( + ctx, x, y, /*axis*/ -1, ImagAndRealToComplexFunctor(), z); + } +#endif + } +}; + +template +class ComplexGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + using Tensor = framework::Tensor; + + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* dout = ctx.Input(framework::GradVarName("Out")); + auto* dx = ctx.Output(framework::GradVarName("X")); + auto* dy = ctx.Output(framework::GradVarName("Y")); + using C = platform::complex; + + // skip out in a hacky way + auto* out = dout; + ElemwiseGradCompute, + ComplexGradForImagFunctor, C>( + ctx, *x, *y, *out, *dout, /*axis*/ -1, dx, dy, + ComplexGradForRealFunctor(), ComplexGradForImagFunctor()); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/label_smooth_op.cu b/paddle/fluid/operators/label_smooth_op.cu index 2e7d1de3bd..2c7a08de0f 100644 --- a/paddle/fluid/operators/label_smooth_op.cu +++ b/paddle/fluid/operators/label_smooth_op.cu @@ -28,7 +28,7 @@ struct LabelSmoothFunctor { label_dim = static_cast(label_dim_data); } - __device__ __forceinline__ T operator()(const T& x) const { + __device__ __forceinline__ T operator()(const T x) const { return (static_cast(1 - epsilon) * x + static_cast(epsilon / label_dim)); } @@ -42,7 +42,7 @@ struct LabelSmoothGradFunctor { epsilon = static_cast(epsilon_data); } - __device__ __forceinline__ T operator()(const T& x) const { + __device__ __forceinline__ T operator()(const T x) const { return static_cast(1 - epsilon) * x; } }; diff --git a/paddle/fluid/operators/lgamma_op.cu b/paddle/fluid/operators/lgamma_op.cu index baf86c99b5..da40518d9b 100644 --- a/paddle/fluid/operators/lgamma_op.cu +++ b/paddle/fluid/operators/lgamma_op.cu @@ -21,7 +21,7 @@ namespace operators { template struct CudaLgammaFunctor { - __device__ __forceinline__ T operator()(const T& x) const { + __device__ __forceinline__ T operator()(const T x) const { return Eigen::numext::lgamma(x); } }; diff --git a/paddle/fluid/operators/matrix_rank_op.h b/paddle/fluid/operators/matrix_rank_op.h index 7fa7436833..c3d99a21b7 100644 --- a/paddle/fluid/operators/matrix_rank_op.h +++ b/paddle/fluid/operators/matrix_rank_op.h @@ -48,17 +48,17 @@ static DDim RemoveLastDim(const DDim& dim) { template struct GreaterThanFunctor { - HOSTDEVICE int operator()(const T& a, const T& b) const { return a > b; } + HOSTDEVICE int operator()(const T a, const T b) const { return a > b; } }; template struct LessThanFunctor { - HOSTDEVICE int operator()(const T& a, const T& b) const { return a < b; } + HOSTDEVICE int operator()(const T a, const T b) const { return a < b; } }; template struct GreaterElementFunctor { - HOSTDEVICE T operator()(const T& a, const T& b) const { + HOSTDEVICE T operator()(const T a, const T b) const { if (a > b) { return a; } else { -- Gitee