Skip to content

Commit

Permalink
Clean code and update doc.
Browse files Browse the repository at this point in the history
  • Loading branch information
qingqing01 committed Oct 30, 2017
1 parent 1d7c03e commit 2c5d4c6
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 21 deletions.
10 changes: 5 additions & 5 deletions paddle/operators/lstm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -126,11 +126,11 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker {
" - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}.")
.AsDispensable();
AddOutput("Hidden",
"(LoDTensor) the hidden state lod tensor of LSTM operator. "
"The shape and lod is the same with the `Input`.");
"(LoDTensor) the hidden state of LSTM operator. "
"The shape is (T x D), and lod is the same with the `Input`.");
AddOutput("Cell",
"(LoDTensor) the cell state lod tensor of LSTM operator. "
"The shape and lod is the same with the `Input`.");
"(LoDTensor) the cell state of LSTM operator. "
"The shape is (T x D), and lod is the same with the `Input`.");
AddOutput("BatchGate",
"(LoDTensor) This LoDTensor contains input gate, forget gate "
"and output gate after the nonlinear computation. This "
Expand All @@ -141,7 +141,7 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker {
"in the raw input.")
.AsIntermediate();
AddOutput("BatchCellPreAct",
"(LoDTensor) This LoDTensor is get in the forward and used "
"(LoDTensor) This LoDTensor is got in the forward and used "
"in the backward.")
.AsIntermediate();
AddAttr<bool>("usePeepholes",
Expand Down
14 changes: 1 addition & 13 deletions paddle/operators/lstm_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,6 @@ class LSTMGradKernel : public framework::OpKernel<T> {
auto* batch_cell_pre_act = ctx.Input<LoDTensor>("BatchCellPreAct");

auto* hidden_g = ctx.Input<LoDTensor>(framework::GradVarName("Hidden"));
// auto* cell_g = ctx.Input<LoDTensor>(framework::GradVarName("Cell"));

auto* in_g = ctx.Output<LoDTensor>(framework::GradVarName("Input"));
auto* weight_g = ctx.Output<Tensor>(framework::GradVarName("Weight"));
Expand Down Expand Up @@ -251,7 +250,7 @@ class LSTMGradKernel : public framework::OpKernel<T> {
lstm_grad.gateGrad = gate_g.data<T>();
lstm_grad.outputGrad = out_g.data<T>();

if (n != 0) {
if (n) {
int bstart_pre = static_cast<int>(batch_starts[n - 1]);
Tensor cell_pre = batch_cell.Slice(bstart_pre, bstart);
Tensor cell_pre_g = batch_cell_g.Slice(bstart_pre, bstart);
Expand Down Expand Up @@ -292,17 +291,6 @@ class LSTMGradKernel : public framework::OpKernel<T> {
}
if (bias && bias_g) {
/* backward bias */
// Following Eigen computation failed for double type on GPU device.
// bias_g->mutable_data<T>(ctx.GetPlace());
// Tensor bias_mat;
// bias_mat.ShareDataWith(*bias_g);
// bias_mat.Resize({1, 4 * frame_size});

// auto bias_g_e = EigenVector<T>::Flatten(bias_mat);
// auto gate_g_e = EigenMatrix<T>::From(batch_gate_g);
// Eigen::array<int, 1> dims{{0}};
// bias_g_e.device(ctx.GetEigenDevice<Place>()) = gate_g_e.sum(dims);

int m = static_cast<int>(batch_gate_g.dims()[0]);
int n = static_cast<int>(batch_gate_g.dims()[1]);

Expand Down
8 changes: 5 additions & 3 deletions python/paddle/v2/framework/tests/test_lstm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,9 +161,11 @@ def test_check_output(self):

#TODO(qingqing) add more unit testing case
def test_check_grad(self):
# TODO(qingqing) remove folowing two lines after the check_grad is refined.
self.outputs['BatchGate'] = None
self.outputs['BatchCellPreAct'] = None
# TODO(qingqing) remove folowing lines after the check_grad is refined.
N = len(self.lod[0]) - 1
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64')
self.check_grad(
['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=0.02)

Expand Down

0 comments on commit 2c5d4c6

Please sign in to comment.