-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Hsigmoid op #11063
Hsigmoid op #11063
Changes from 22 commits
695b103
a25c3ae
1971f3c
1abd3b3
2ce5694
1f9426f
f839563
28630dd
fb9c08f
1399e5a
f071649
74f519f
80ce7ed
9f2f76a
3e46ec4
b3f9e5e
31e2833
34d92ea
2f49432
57c0932
4bd08e3
2a1fc03
8bd148d
ee13b39
1021089
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,163 @@ | ||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#include "paddle/fluid/operators/hierarchical_sigmoid_op.h" | ||
#include <vector> | ||
|
||
namespace paddle { | ||
namespace operators { | ||
|
||
/** | ||
* Organize the classes into a binary tree. At each node, a sigmoid function | ||
* is used to calculate the probability of belonging to the right branch. | ||
* This idea is from "F. Morin, Y. Bengio (AISTATS 05): | ||
* Hierarchical Probabilistic Neural Network Language Model." | ||
* | ||
* Here we uses a simple way of making the binary tree. | ||
* Assuming the number of classes C = 6, | ||
* The classes are organized as a binary tree in the following way: | ||
* | ||
* @code{.py} | ||
* *-*-*- 2 | ||
* | | |- 3 | ||
* | | | ||
* | |-*- 4 | ||
* | |- 5 | ||
* | | ||
* |-*- 0 | ||
* |- 1 | ||
* @endcode | ||
* | ||
* where * indicates an internal node, and each leaf node represents a class. | ||
* - Node 0 ... C-2 are internal nodes. | ||
* - Node C-1 ... 2C-2 are leaf nodes. | ||
* - Class c is represented by leaf node \f$c+C-1\f$. | ||
* | ||
* We assign an id for each node: | ||
* - the id of root be 0. | ||
* - the left child of a node i is 2*i+1. | ||
* - the right child of a node i is 2*i+2. | ||
* | ||
* It's easy to see that: | ||
* - the parent of node i is \f$\left\lfloor(i-1)/2\right\rfloor\f$. | ||
* - the j-th level ancestor of node i is | ||
* \f$\left\lfloor(i+1)/2^{j+1}\right\rfloor - 1\f$. | ||
* - A node i is a left child of its parent if \f$(i-1)\%2==0\f$. | ||
* | ||
*/ | ||
|
||
class HierarchicalSigmoidOp : public framework::OperatorWithKernel { | ||
public: | ||
using framework::OperatorWithKernel::OperatorWithKernel; | ||
void InferShape(framework::InferShapeContext* ctx) const override { | ||
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); | ||
PADDLE_ENFORCE(ctx->HasInput("Ids"), "Input(Ids) should not be null."); | ||
PADDLE_ENFORCE(ctx->HasInput("W"), "Input(W) should not be null."); | ||
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null."); | ||
PADDLE_ENFORCE(ctx->HasOutput("PreOut"), | ||
"Output(PreOut) should not be null."); | ||
const int64_t batch_size = ctx->GetInputDim("X")[0]; | ||
std::vector<int64_t> output_shape({batch_size, 1}); | ||
ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); | ||
} | ||
|
||
protected: | ||
framework::OpKernelType GetExpectedKernelType( | ||
const framework::ExecutionContext& ctx) const override { | ||
return framework::OpKernelType( | ||
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()), | ||
ctx.GetPlace()); | ||
} | ||
}; | ||
|
||
template <typename AttrType> | ||
class HierarchicalSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { | ||
public: | ||
void Make() override { | ||
AddInput("X", | ||
"(Tensor, required) The input Tensor, which the shape is" | ||
"[N * D], which N is the size of mini-batch," | ||
"D is the embded size"); | ||
AddInput("W", | ||
"(Tensor, required), The parameters of hierarchical " | ||
"sigmoid operator, each of them is s a 3-D tensor, the shape is" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done |
||
"[num_classes - 1, D]"); | ||
AddInput("Ids", | ||
"(Tensor, required), The labels of training data. It's a" | ||
"1-D tensor, which the shape is [1, N]"); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done |
||
AddInput("Bias", | ||
"(Tensor, optional), The bias is a 1-D tensor, " | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Maybe we can reformulate this by There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done |
||
"which is applied to the output, the shape is" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done |
||
"[1, num_classes -1]"); | ||
AddOutput("Out", | ||
"(Tensor, required) The output of hierarchical sigmoid operator." | ||
"the shape is [N, 1]"); | ||
AddOutput("PreOut", | ||
"(Tensor, required) A intermedia 2-D Tensor, which the shape is " | ||
"[batch_size, code_length]") | ||
.AsIntermediate(); | ||
AddAttr<AttrType>("num_classes", "(int, required), The number of classes") | ||
.SetDefault(2); | ||
AddComment(R"DOC( | ||
The hierarchical sigmoid operator organize the classes into a binary tree. | ||
At each node, a sigmoid function is used to caculate the probability of | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. A spelling error. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done |
||
belonging to the right branch. This idea is from | ||
"F. Morin, Y. Bengio (AISTATS 05): | ||
Hierarchical Probabilistic Neural Network Language Model." | ||
)DOC"); | ||
} | ||
}; | ||
|
||
class HierarchicalSigmoidGradOp : public framework::OperatorWithKernel { | ||
public: | ||
using framework::OperatorWithKernel::OperatorWithKernel; | ||
void InferShape(framework::InferShapeContext* ctx) const override { | ||
PADDLE_ENFORCE(ctx->HasInput("W"), "Input(W) should not be null."); | ||
PADDLE_ENFORCE(ctx->HasInput("Ids"), "Input(Ids) should not be null."); | ||
PADDLE_ENFORCE(ctx->HasInput("PreOut"), | ||
"Input(Preout) should not be null."); | ||
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("W")), | ||
"Output(W@Grad should not be null.)"); | ||
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X"))); | ||
if (ctx->HasOutput(framework::GradVarName("Bias"))) { | ||
ctx->SetOutputDim(framework::GradVarName("Bias"), | ||
ctx->GetInputDim("Bias")); | ||
} | ||
ctx->SetOutputDim(framework::GradVarName("W"), ctx->GetInputDim("W")); | ||
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); | ||
} | ||
|
||
protected: | ||
framework::OpKernelType GetExpectedKernelType( | ||
const framework::ExecutionContext& ctx) const override { | ||
return framework::OpKernelType( | ||
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()), | ||
ctx.GetPlace()); | ||
} | ||
}; | ||
|
||
} // namespace operators | ||
} // namespace paddle | ||
|
||
namespace ops = paddle::operators; | ||
REGISTER_OPERATOR(hierarchical_sigmoid, ops::HierarchicalSigmoidOp, | ||
ops::HierarchicalSigmoidOpMaker<int>, | ||
paddle::framework::DefaultGradOpDescMaker<true>); | ||
REGISTER_OPERATOR(hierarchical_sigmoid_grad, ops::HierarchicalSigmoidGradOp); | ||
REGISTER_OP_CPU_KERNEL(hierarchical_sigmoid, | ||
ops::HierarchicalSigmoidOpKernel< | ||
paddle::platform::CPUDeviceContext, float>); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please also register with double. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done |
||
REGISTER_OP_CPU_KERNEL(hierarchical_sigmoid_grad, | ||
ops::HierarchicalSigmoidGradOpKernel< | ||
paddle::platform::CPUDeviceContext, float>); |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,123 @@ | ||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#pragma once | ||
#include <iostream> | ||
#include <vector> | ||
#include "paddle/fluid/framework/op_registry.h" | ||
#include "paddle/fluid/operators/clip_op.h" | ||
#include "paddle/fluid/operators/math/math_function.h" | ||
#include "paddle/fluid/operators/math/matrix_bit_code.h" | ||
#include "paddle/fluid/platform/transform.h" | ||
namespace paddle { | ||
namespace operators { | ||
|
||
template <typename T, int MajorType = Eigen::RowMajor, | ||
typename IndexType = Eigen::DenseIndex> | ||
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>; | ||
using platform::Transform; | ||
|
||
template <typename DeviceContext, typename T> | ||
class HierarchicalSigmoidOpKernel : public framework::OpKernel<T> { | ||
public: | ||
void Compute(const framework::ExecutionContext& ctx) const override { | ||
auto* in = ctx.Input<framework::Tensor>("X"); | ||
auto* w = ctx.Input<framework::Tensor>("W"); | ||
auto* ids = ctx.Input<framework::Tensor>("Ids"); | ||
auto* bias = ctx.Input<framework::Tensor>("Bias"); | ||
auto* out = ctx.Output<framework::Tensor>("Out"); | ||
auto* pre_out = ctx.Output<framework::Tensor>("PreOut"); | ||
size_t num_classes = static_cast<size_t>(ctx.Attr<int>("num_classes")); | ||
int64_t code_length = math::FindLastSet(num_classes - 1); | ||
int64_t batch_size = in->dims()[0]; | ||
framework::Tensor sum; | ||
math::SetConstant<DeviceContext, T> zero; | ||
auto& dev_ctx = ctx.template device_context<DeviceContext>(); | ||
auto pre_out_data = pre_out->mutable_data<T>( | ||
framework::make_ddim({batch_size, code_length}), ctx.GetPlace()); | ||
auto pre_out_mat = EigenMatrix<T>::From(*pre_out); | ||
zero(dev_ctx, pre_out, static_cast<T>(0.0)); | ||
auto& place = *ctx.template device_context<DeviceContext>().eigen_device(); | ||
math::RowwiseSum<DeviceContext, T> row_sum; | ||
math::MatrixBitCodeFunctor<T> bit_code(num_classes, ids->data<int64_t>()); | ||
|
||
std::vector<int64_t> sum_dims({batch_size, 1UL}); | ||
sum.mutable_data<T>(framework::make_ddim(sum_dims), ctx.GetPlace()); | ||
auto sum_mat = EigenMatrix<T>::From(sum); | ||
out->mutable_data<T>(ctx.GetPlace()); | ||
auto out_mat = framework::EigenVector<T>::Flatten(*out); | ||
if (bias) { | ||
bit_code.Add(pre_out, *bias); | ||
} | ||
bit_code.Mul(pre_out, *w, *in); | ||
// clip the matrix with (-40, 40) | ||
Transform<DeviceContext> trans; | ||
trans(ctx.template device_context<DeviceContext>(), pre_out_data, | ||
pre_out_data + pre_out->numel(), pre_out_data, | ||
ClipFunctor<T>(static_cast<T>(-40.0), static_cast<T>(40.0))); | ||
bit_code.Sum(*pre_out, out, static_cast<T>(-1)); | ||
// softrelu with threshold is 40.0 | ||
trans(ctx.template device_context<DeviceContext>(), pre_out_data, | ||
pre_out_data + pre_out->numel(), pre_out_data, | ||
ClipFunctor<T>(static_cast<T>(-40.0), static_cast<T>(40.0))); | ||
pre_out_mat.device(place) = (static_cast<T>(1.0) + pre_out_mat.exp()).log(); | ||
row_sum(dev_ctx, *pre_out, &sum); | ||
out_mat.device(place) = sum_mat + out_mat; | ||
} | ||
}; | ||
|
||
template <typename DeviceContext, typename T> | ||
class HierarchicalSigmoidGradOpKernel : public framework::OpKernel<T> { | ||
public: | ||
void Compute(const framework::ExecutionContext& ctx) const override { | ||
auto* in = ctx.Input<framework::Tensor>("X"); | ||
auto* w = ctx.Input<framework::Tensor>("W"); | ||
auto* in_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X")); | ||
auto* w_grad = ctx.Output<framework::Tensor>(framework::GradVarName("W")); | ||
auto* bias_grad = | ||
ctx.Output<framework::Tensor>(framework::GradVarName("Bias")); | ||
auto* ids = ctx.Input<framework::Tensor>("Ids"); | ||
auto* pre_out = ctx.Input<framework::Tensor>("PreOut"); | ||
auto* out_grad = | ||
ctx.Input<framework::Tensor>(framework::GradVarName("Out")); | ||
|
||
size_t num_classes = static_cast<size_t>(ctx.Attr<int>("num_classes")); | ||
int64_t code_length = math::FindLastSet(num_classes - 1); | ||
int64_t batch_size = in->dims()[0]; | ||
framework::Tensor pre_out_grad; | ||
pre_out_grad.mutable_data<T>( | ||
framework::make_ddim({batch_size, code_length}), ctx.GetPlace()); | ||
auto& place = *ctx.template device_context<DeviceContext>().eigen_device(); | ||
auto pre_out_mat = EigenMatrix<T>::From(*pre_out); | ||
auto pre_out_grad_mat = EigenMatrix<T>::From(pre_out_grad); | ||
math::MatrixBitCodeFunctor<T> bit_code(num_classes, ids->data<int64_t>()); | ||
// softrelu derivative | ||
bit_code.OutGrad(&pre_out_grad, *out_grad); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It seems that the above line is to do multiplication with broadcast, maybe we can use some existing functor rather than writing one. This computation seems not relevant to bit_code and softrelu derivative. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done |
||
pre_out_grad_mat.device(place) = | ||
pre_out_grad_mat * | ||
(static_cast<T>(1.0) - static_cast<T>(1.0) / pre_out_mat.exp()); | ||
bit_code.Sub(&pre_out_grad); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think ClipGradFunctor is needed after this line, since clipping is done before |
||
if (bias_grad) { | ||
bias_grad->mutable_data<T>(ctx.GetPlace()); | ||
bit_code.AddGrad(pre_out_grad, bias_grad); | ||
} | ||
in_grad->mutable_data<T>(ctx.GetPlace()); | ||
w_grad->mutable_data<T>(ctx.GetPlace()); | ||
bit_code.MulGradWeight(pre_out_grad, w_grad, *in); | ||
bit_code.MulGradError(pre_out_grad, *w, in_grad); | ||
} | ||
}; | ||
|
||
} // namespace operators | ||
} // namespace paddle |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Maybe
[N, D]
is better than[N * D]
.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done