Skip to content

Commit

Permalink
add new API/OP: paddle.poisson (#38117)
Browse files Browse the repository at this point in the history
* add new API/OP:paddle.poisson

* fix comment
  • Loading branch information
zhwesky2010 authored Dec 24, 2021
1 parent 7339a12 commit bcf86e5
Show file tree
Hide file tree
Showing 12 changed files with 506 additions and 18 deletions.
132 changes: 132 additions & 0 deletions paddle/fluid/operators/poisson_op.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <string>

#include "paddle/fluid/operators/poisson_op.h"

namespace paddle {
namespace operators {

class PoissonOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "PoissonOp");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "PoissonOp");

auto dim = ctx->GetInputDim("X");
ctx->SetOutputDim("Out", dim);
}

protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
return framework::OpKernelType(
OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace());
}
};

class PoissonOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "(Tensor) The input tensor of poisson op");
AddOutput("Out",
"The output tensor of poisson op, it has the same shape and "
"dtype with input. Each element corresponds to input tensor");
AddComment(R"DOC(
This operator generate random value that obey poisson distribution.
)DOC");
}
};

class PoissonOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
protected:
std::unordered_map<std::string, std::string> &GetInputOutputWithSameType()
const override {
static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
return m;
}
};

template <typename T>
class PoissonKernel<platform::CPUDeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *x = ctx.Input<framework::Tensor>("X");
auto *out = ctx.Output<framework::Tensor>("Out");

const T *x_data = x->data<T>();
T *out_data = out->mutable_data<T>(ctx.GetPlace());

int64_t size = x->numel();

auto gen = framework::DefaultCPUGenerator();
auto engine = gen->GetCPUEngine();

for (int64_t i = 0; i < size; ++i) {
std::poisson_distribution<> dist(x_data[i]);
out_data[i] = static_cast<T>(dist(*engine));
}
}
};

class PoissonGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Out_Grad", "PoissonGradOp");

auto dout_dim = ctx->GetInputDim(framework::GradVarName("Out"));
ctx->SetOutputDim(framework::GradVarName("X"), dout_dim);
}
};

template <typename T>
class PoissonGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

protected:
void Apply(GradOpPtr<T> retv) const override {
retv->SetType("poisson_grad");
retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
namespace plat = paddle::platform;

REGISTER_OPERATOR(poisson, ops::PoissonOp, ops::PoissonOpMaker,
ops::PoissonOpInferVarType,
ops::PoissonGradOpMaker<paddle::framework::OpDesc>,
ops::PoissonGradOpMaker<paddle::imperative::OpBase>);

REGISTER_OPERATOR(poisson_grad, ops::PoissonGradOp);

REGISTER_OP_CPU_KERNEL(poisson,
ops::PoissonKernel<plat::CPUDeviceContext, float>,
ops::PoissonKernel<plat::CPUDeviceContext, double>);

REGISTER_OP_CPU_KERNEL(poisson_grad,
ops::PoissonGradKernel<plat::CPUDeviceContext, float>,
ops::PoissonGradKernel<plat::CPUDeviceContext, double>);
92 changes: 92 additions & 0 deletions paddle/fluid/operators/poisson_op.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#ifdef __NVCC__
#include <curand_kernel.h>
#endif
#ifdef __HIPCC__
#include <hiprand_kernel.h>
#endif
#include "paddle/fluid/operators/poisson_op.h"
#include "paddle/fluid/platform/for_range.h"

namespace paddle {
namespace operators {

template <typename T>
struct PoissonCudaFunctor {
public:
PoissonCudaFunctor(const T* in, T* out, unsigned int seed,
unsigned int offset)
: in_(in), out_(out), seed_(seed), offset_(offset) {}

__device__ void operator()(int64_t idx) {
#ifdef __NVCC__
curandStatePhilox4_32_10_t state;
curand_init(seed_, idx, offset_, &state);
out_[idx] = static_cast<T>(curand_poisson(&state, in_[idx]));
#elif __HIPCC__
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed_, idx, offset_, &state);
out_[idx] = static_cast<T>(hiprand_poisson(&state, in_[idx]));
#endif
}

private:
const T* in_;
T* out_;
const unsigned int seed_;
const unsigned int offset_;
};

template <typename T>
class PoissonKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto* x = ctx.Input<framework::Tensor>("X");
auto* out = ctx.Output<framework::Tensor>("Out");

const T* x_data = x->data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
auto size = x->numel();
int64_t device_id =
BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()).GetDeviceId();

auto gen_cuda = framework::GetDefaultCUDAGenerator(device_id);
auto seed_offset = gen_cuda->IncrementOffset(20);
uint64_t seed = seed_offset.first;
uint64_t offset = seed_offset.second;

auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, size);

PoissonCudaFunctor<T> functor(x_data, out_data, seed, offset);
for_range(functor);
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
namespace plat = paddle::platform;

REGISTER_OP_CUDA_KERNEL(poisson,
ops::PoissonKernel<plat::CUDADeviceContext, float>,
ops::PoissonKernel<plat::CUDADeviceContext, double>);

REGISTER_OP_CUDA_KERNEL(
poisson_grad, ops::PoissonGradKernel<plat::CUDADeviceContext, float>,
ops::PoissonGradKernel<plat::CUDADeviceContext, double>);
41 changes: 41 additions & 0 deletions paddle/fluid/operators/poisson_op.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/math/math_function.h"

namespace paddle {
namespace operators {

template <typename DeviceContext, typename T>
class PoissonKernel;

template <typename DeviceContext, typename T>
class PoissonGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* dx = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
dx->mutable_data<T>(ctx.GetPlace());
math::SetConstant<DeviceContext, T> functor;
auto& dev_ctx = ctx.template device_context<DeviceContext>();
functor(dev_ctx, dx, static_cast<T>(0));
}
};

} // namespace operators
} // namespace paddle
5 changes: 2 additions & 3 deletions paddle/fluid/operators/uniform_random_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace {
template <typename T>
inline void UniformRealDistribution(T *data, const int64_t &size,
const float &min, const float &max,
const unsigned int &seed) {
const unsigned int seed) {
VLOG(4) << "[CPU] UniformRandomKernel<T>";
std::uniform_real_distribution<T> dist(static_cast<T>(min),
static_cast<T>(max));
Expand All @@ -41,8 +41,7 @@ inline void UniformRealDistribution(T *data, const int64_t &size,
template <>
inline void UniformRealDistribution(paddle::platform::bfloat16 *data,
const int64_t &size, const float &min,
const float &max,
const unsigned int &seed) {
const float &max, const unsigned int seed) {
VLOG(4) << "[CPU] UniformRandomKernel<bfloat16>";
std::uniform_real_distribution<float> dist(min, max);
auto engine = paddle::framework::GetCPURandomEngine(seed);
Expand Down
2 changes: 1 addition & 1 deletion paddle/scripts/paddle_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -575,7 +575,7 @@ EOF
export http_proxy=
export https_proxy=
set -x

set +ex
if [ "$1" == "cp36-cp36m" ]; then
pip3.6 uninstall -y paddlepaddle
Expand Down
5 changes: 3 additions & 2 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,6 @@
import paddle.static # noqa: F401
import paddle.vision # noqa: F401

from .tensor.random import bernoulli # noqa: F401

from .tensor.attribute import is_complex # noqa: F401
from .tensor.attribute import is_integer # noqa: F401
from .tensor.attribute import rank # noqa: F401
Expand Down Expand Up @@ -248,6 +246,8 @@
from .tensor.math import fmax # noqa: F401
from .tensor.math import fmin # noqa: F401

from .tensor.random import bernoulli # noqa: F401
from .tensor.random import poisson # noqa: F401
from .tensor.random import multinomial # noqa: F401
from .tensor.random import standard_normal # noqa: F401
from .tensor.random import normal # noqa: F401
Expand Down Expand Up @@ -488,6 +488,7 @@
'exp',
'expm1',
'bernoulli',
'poisson',
'sinh',
'round',
'DataParallel',
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/initializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1152,12 +1152,12 @@ def calculate_gain(nonlinearity, param=None):
Args:
nonlinearity(str): name of nonlinearity activation function. If it is a linear function, which is one of
"linear/conv1d/conv2d/conv3d/conv1d_transpose/conv2d_transpose/conv3d_transpose" , will return 1.0
"linear/conv1d/conv2d/conv3d/conv1d_transpose/conv2d_transpose/conv3d_transpose" , 1.0 will be returned.
param(bool|int|float, optional): optional parameter for somme nonlinearity function. Now, it only applies to
'leaky_relu'. Default: None, it will be calculated as 0.01 in the formula.
Returns:
The recommended gain value for nonlinearity function.
A float value, which is the recommended gain for this nonlinearity function.
Examples:
.. code-block:: python
Expand Down
8 changes: 2 additions & 6 deletions python/paddle/fluid/tests/unittests/test_bernoulli_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,18 +32,14 @@ class TestBernoulliOp(OpTest):
def setUp(self):
self.op_type = "bernoulli"
self.inputs = {"X": np.random.uniform(size=(1000, 784))}
self.init_attrs()
self.outputs = {"Out": np.zeros((1000, 784)).astype("float32")}

def init_attrs(self):
self.attrs = {}
self.output_hist = output_hist
self.outputs = {"Out": np.zeros((1000, 784)).astype("float32")}

def test_check_output(self):
self.check_output_customized(self.verify_output)

def verify_output(self, outs):
hist, prob = self.output_hist(np.array(outs[0]))
hist, prob = output_hist(np.array(outs[0]))
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
Expand Down
Loading

0 comments on commit bcf86e5

Please sign in to comment.