Skip to content

Commit

Permalink
remove debug code (PaddlePaddle#21)
Browse files Browse the repository at this point in the history
* search and fill slot_feature

* search and fill slot_feature, fix compile error

* search and fill slot_feature, rename 8 as slot_num_

* remove debug code

Co-authored-by: root <[email protected]>
  • Loading branch information
huwei02 and root authored Jun 8, 2022
1 parent 1816fc2 commit 8c65693
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 29 deletions.
10 changes: 0 additions & 10 deletions paddle/fluid/framework/data_feed.cu
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,6 @@ limitations under the License. */
#include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_node.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h"

DECLARE_int32(batch_num);

namespace paddle {
namespace framework {

Expand Down Expand Up @@ -349,15 +347,7 @@ int GraphDataGenerator::FillInsBuf() {
}


int times = 0;
int GraphDataGenerator::GenerateBatch() {
times += 1;
VLOG(0) << "Begin batch " << times;
if (times > FLAGS_batch_num) {
VLOG(0) << "close batch";
return 0;
}

platform::CUDADeviceGuard guard(gpuid_);
int res = 0;
while (ins_buf_pair_len_ < batch_size_) {
Expand Down
19 changes: 0 additions & 19 deletions paddle/fluid/platform/flags.cc
Original file line number Diff line number Diff line change
Expand Up @@ -88,25 +88,6 @@ PADDLE_DEFINE_EXPORTED_bool(
"input and output must be half precision) and recurrent neural networks "
"(RNNs).");

/**
* CUDA related FLAG
* Name: FLAGS_selected_gpus
* Since Version: 1.3.0
* Value Range: integer list separated by comma, default empty list
* Example: FLAGS_selected_gpus=0,1,2,3,4,5,6,7 to train or predict with 0~7 gpu
* cards
* Note: A list of device ids separated by comma, like: 0,1,2,3
*/
PADDLE_DEFINE_EXPORTED_int32(
batch_num, 0,
"A list of device ids separated by comma, like: 0,1,2,3. "
"This option is useful when doing multi process training and "
"each process have only one device (GPU). If you want to use "
"all visible devices, set this to empty string. NOTE: the "
"reason of doing this is that we want to use P2P communication"
"between GPU devices, use CUDA_VISIBLE_DEVICES can only use"
"share-memory only.");

/**
* CUDA related FLAG
* Name: FLAGS_selected_gpus
Expand Down

0 comments on commit 8c65693

Please sign in to comment.