Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add xpu_wait before bkcl_all_reduce. #16

Merged
merged 2 commits into from
Oct 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions paddle/fluid/framework/boxps_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -695,6 +695,9 @@ void BoxPSWorker::SyncParam(void) {
TensorScaleValue(place_, param_sync_, &param_sync_, scale);
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamSynchronize(stream));
#elif defined(PADDLE_WITH_XPU_BKCL) || defined(PADDLE_WITH_XPU)
// Other dense op use default stream, so we need wait other op calc finished before call bkcl_all_reduce.
xpu_wait(0);

PADDLE_ENFORCE_EQ(
bkcl_all_reduce(comm->comm(),
sendbuff,
Expand Down
3 changes: 3 additions & 0 deletions paddle/fluid/operators/collective/c_allreduce_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -343,6 +343,9 @@ class CAllReduceOpXPUKernel : public framework::OpKernel<T> {
"Invalid reduce type: %d", red_type));
}

// Other dense op use default stream, so we need wait other op calc finished before call bkcl_all_reduce.
xpu_wait(0);

PADDLE_ENFORCE_EQ(
bkcl_all_reduce(comm->comm(),
sendbuff,
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/operators/collective/c_mixallgather_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -417,6 +417,10 @@ class CMixAllGatherOpXPUKernel : public framework::OpKernel<T> {
#ifdef TRACE_PROFILE
TRACE_SCOPE_START("bkcl_all_reduce", xpu_wait(stream));
#endif

// Other dense op use default stream, so we need wait other op calc finished before call bkcl_all_reduce.
xpu_wait(0);

PADDLE_ENFORCE_EQ(
bkcl_all_reduce(comm->comm(),
recvbuff,
Expand Down
16 changes: 10 additions & 6 deletions paddle/fluid/operators/data_norm_op_xpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ template <typename DeviceContext, typename T>
class DataNormXPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const std::string& data_layout_str = context.Attr<std::string>("data_layout");
const std::string& data_layout_str = context.Attr<std::string>("data_layout");
const auto data_layout = framework::StringToDataLayout(data_layout_str);
const auto *x = context.Input<Tensor>("X");
const auto &x_dims = x->dims();
Expand All @@ -37,12 +37,12 @@ class DataNormXPUKernel : public framework::OpKernel<T> {
if (enable_scale_and_shift) {
scale_w = const_cast<float*>(context.Input<Tensor>("scale_w")->data<T>());
bias = const_cast<float*>(context.Input<Tensor>("bias")->data<T>());
}
}
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument(
"The Input dim size should be 2"));
const int N = x_dims[0];
const int C =
(data_layout == phi::DataLayout::kNCHW ? x_dims[1]
const int C =
(data_layout == phi::DataLayout::kNCHW ? x_dims[1]
: x_dims[x_dims.size() - 1]);
auto *y = context.Output<Tensor>("Y");
auto *mean_out = context.Output<Tensor>("Means");
Expand All @@ -53,8 +53,8 @@ class DataNormXPUKernel : public framework::OpKernel<T> {
const auto* batch_square_sum = context.Input<Tensor>("BatchSquareSum")->data<T>();
mean_out->mutable_data<T>(context.GetPlace());
scales->mutable_data<T>(context.GetPlace());
//means_arr = b_sum_arr / b_size_arr;
T *mean_data = mean_out->data<T>();
//means_arr = b_sum_arr / b_size_arr;
T *mean_data = mean_out->data<T>();
T *scale_data = scales->data<T>();
auto& dev_ctx = context.template device_context<DeviceContext>();
// data_norm(Context *context, const float *x, const float *batch_size,
Expand Down Expand Up @@ -122,6 +122,10 @@ class DataNormGradXPUKernel : public framework::OpKernel<T> {
auto place = ctx.GetPlace();
auto comm = platform::BKCLCommContext::Instance().Get(0, place);
auto stream = dev_ctx.x_context()->xpu_stream;

// Other dense op use default stream, so we need wait other op calc finished before call bkcl_all_reduce.
xpu_wait(0);

PADDLE_ENFORCE_EQ(bkcl_all_reduce(comm->comm(), d_batch_size, d_batch_size,
C, BKCL_FLOAT, BKCL_ADD, stream),
BKCL_SUCCESS, platform::errors::PreconditionNotMet(
Expand Down