From cc50ca9ea776d545a7412fd5b7b7fb632fe4f8e1 Mon Sep 17 00:00:00 2001 From: superjom Date: Wed, 11 Oct 2017 16:21:36 -0400 Subject: [PATCH] clean code --- paddle/operators/dynamic_recurrent_op.cc | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/paddle/operators/dynamic_recurrent_op.cc b/paddle/operators/dynamic_recurrent_op.cc index 0ece381cdae3c..ef2bb5e05f614 100644 --- a/paddle/operators/dynamic_recurrent_op.cc +++ b/paddle/operators/dynamic_recurrent_op.cc @@ -96,7 +96,6 @@ void DynamicRecurrentOp::Run(const Scope& scope, // call stepnet in all the time steps for (size_t step = 0; step < cache_.num_steps; step++) { - LOG(INFO) << "run step " << step; auto& step_scope = cache_.GetScope(step); stepnet_->Run(step_scope, dev_ctx); } @@ -112,23 +111,10 @@ void DynamicRecurrentOp::SplitInputs() const { const auto& var = item.second; const auto& tensor = var->Get(); TensorArray& ta = step_inputs_[item.first]; - // NOTE only for debug - LOG(INFO) << "unpack lod " << tensor.dims(); - LOG(INFO) << "lod: "; - for (auto& vec : tensor.lod()) { - for (auto i : vec) { - LOG(INFO) << i; - } - } dy_seq_metas_[item.first] = ta.Unpack(tensor, level, true /*length_descend*/); - // NOTE for debug - for (size_t i = 0; i < ta.size(); i++) { - LOG(INFO) << i << "-th tensor " << ta.Read(i).dims(); - } - if (cache_.num_steps) { PADDLE_ENFORCE_EQ(ta.size(), cache_.num_steps, "inputs should have the same steps"); @@ -261,7 +247,6 @@ void DynamicRecurrentOp::LinkState(const rnn::MemoryAttr& memory, LoDTensor* pre_state{nullptr}; if (step == 0) { - LOG(INFO) << "init 0-th prestate"; pre_state = cache_.GetTensor(*cache_.scope, memory.boot_var); pre_state->mutable_data(platform::CPUPlace()); // allocate memory @@ -270,7 +255,6 @@ void DynamicRecurrentOp::LinkState(const rnn::MemoryAttr& memory, detail::ReorderBootState(some_meta, *pre_state, &state_pre, pre_state->place()); } else { - LOG(INFO) << "init " << step << "-th prestate"; pre_state = cache_.GetTensor(cache_.GetScope(step - 1), memory.var); }