Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make Workspace::Input return const reference #3452

Merged
merged 7 commits into from
Oct 27, 2021
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion dali/operators/decoder/audio/audio_decoder_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ AudioDecoderCpu::SetupImpl(std::vector<OutputDesc> &output_desc, const workspace

for (int i = 0; i < batch_size; i++) {
auto &meta = sample_meta_[i] =
decoders_[i]->Open({reinterpret_cast<const char *>(input[i].raw_mutable_data()),
decoders_[i]->Open({static_cast<const char *>(input[i].raw_data()),
input[i].shape().num_elements()});
TensorShape<> data_sample_shape = DecodedAudioShape(
meta, use_resampling_ ? target_sample_rates_[i] : -1.0f, downmix_);
Expand Down
4 changes: 2 additions & 2 deletions dali/operators/generic/join.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ void TensorJoin<Backend, new_axis>::SetupTyped(

copy_idx_ = 0;
for (int i = 0; i < ninp; i++) {
auto tlv = view<T>(ws.template Input<Backend>(i));
auto tlv = view<const T>(ws.template Input<Backend>(i));
if (new_axis || tlv.num_elements() > 0) { // when concatenating, we can skip empty inputs
if (inputs.empty())
copy_idx_ = i;
Expand All @@ -109,7 +109,7 @@ void TensorJoin<Backend, new_axis>::SetupTyped(

// No non-empty inputs? Use the first one, even if it's empty.
if (inputs.empty()) {
inputs.push_back(view<T>(ws.template Input<Backend>(0)));
inputs.push_back(view<const T>(ws.template Input<Backend>(0)));
}

kernels::tensor_join::JoinedShape(output_shape, [&](int index) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ class CombineTransformsCPU : public Operator<CPUBackend> {
in_views.reserve(ws.NumInput());
for (int input_idx = 0; input_idx < ws.NumInput(); input_idx++) {
auto &in = ws.template Input<CPUBackend>(input_idx);
in_views.push_back(view<T, 2>(in));
in_views.push_back(view<const T, 2>(in));
}
auto out_view = view<T, 2>(out);
auto read_mat = [](affine_mat_t<T, mat_dim> &next_mat,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ class TransformBaseOp : public Operator<Backend> {
auto out_view = view<T>(out);
if (has_input_) {
auto &in = ws.template Input<Backend>(0);
auto in_view = view<T>(in);
auto in_view = view<const T>(in);
for (int i = 0; i < nsamples_; i++) {
int mat_idx = num_mats == 1 ? 0 : i;
ApplyTransform(out_view[i].data, in_view[i].data, matrices[mat_idx]);
Expand Down
2 changes: 1 addition & 1 deletion dali/operators/numba_function/numba_func.cc
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ void NumbaFuncImpl<CPUBackend>::RunImpl(workspace_t<CPUBackend> &ws) {
for (size_t in_id = 0; in_id < in_types_.size(); in_id++) {
auto& in = ws.Input<CPUBackend>(in_id);
for (int i = 0; i < N; i++) {
in_ptrs[N * in_id + i] = reinterpret_cast<uint64_t>(in[i].raw_mutable_data());
in_ptrs[N * in_id + i] = reinterpret_cast<uint64_t>(in[i].raw_data());
}
}

Expand Down
4 changes: 2 additions & 2 deletions dali/pipeline/data/dltensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,12 @@ void DLMTensorPtrDeleter(DLManagedTensor* dlm_tensor_ptr) {
}
}

DLMTensorPtr MakeDLTensor(void* data, DALIDataType type,
DLMTensorPtr MakeDLTensor(const void* data, DALIDataType type,
bool device, int device_id,
std::unique_ptr<DLTensorResource> resource) {
DLManagedTensor *dlm_tensor_ptr = &resource->dlm_tensor;
DLTensor &dl_tensor = dlm_tensor_ptr->dl_tensor;
dl_tensor.data = data;
dl_tensor.data = const_cast<void*>(data); // This data is used only as input
dl_tensor.ndim = resource->shape.size();
dl_tensor.shape = resource->shape.begin();
if (!resource->strides.empty()) {
Expand Down
6 changes: 3 additions & 3 deletions dali/pipeline/data/dltensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ struct DLTensorResource {
virtual ~DLTensorResource() = default;
};

DLL_PUBLIC DLMTensorPtr MakeDLTensor(void *data, DALIDataType type,
DLL_PUBLIC DLMTensorPtr MakeDLTensor(const void *data, DALIDataType type,
bool device, int device_id,
std::unique_ptr<DLTensorResource> resource);

Expand All @@ -57,12 +57,12 @@ DLMTensorPtr GetDLTensorView(Tensor<Backend> &tensor) {
}

template <typename Backend>
std::vector<DLMTensorPtr> GetDLTensorListView(TensorList<Backend> &tensor_list) {
std::vector<DLMTensorPtr> GetDLTensorListView(const TensorList<Backend> &tensor_list) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think that we should pass a non-const TL here and adjust the call site to use UnsafeMutableInput.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

std::vector<DLMTensorPtr> dl_tensors{};
dl_tensors.reserve(tensor_list.num_samples());
for (size_t i = 0; i < tensor_list.num_samples(); ++i) {
const auto &shape = tensor_list.tensor_shape(i);
dl_tensors.push_back(MakeDLTensor(tensor_list.raw_mutable_tensor(i),
dl_tensors.push_back(MakeDLTensor(tensor_list.raw_tensor(i),
tensor_list.type(),
std::is_same<Backend, GPUBackend>::value,
tensor_list.device_id(),
Expand Down
2 changes: 1 addition & 1 deletion dali/pipeline/data/tensor_list.h
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ class DLL_PUBLIC TensorList : private Buffer<Backend> {
* shared data or the call will fail.
* Size can be set to 0 and type to NoType as intermediate step.
*/
DLL_PUBLIC inline void ShareData(TensorList<Backend> &other) {
DLL_PUBLIC inline void ShareData(const TensorList<Backend> &other) {
DALI_ENFORCE(IsValidType(other.type_), "To share data, "
"the input TensorList must have a valid data type");

Expand Down
4 changes: 2 additions & 2 deletions dali/pipeline/data/tensor_vector.cc
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ void TensorVector<Backend>::Copy(const TensorVector<SrcBackend> &in_tv, cudaStre


template <typename Backend>
void TensorVector<Backend>::ShareData(TensorList<Backend> &in_tl) {
void TensorVector<Backend>::ShareData(const TensorList<Backend> &in_tl) {
SetContiguous(true);
type_ = in_tl.type_info();
pinned_ = in_tl.is_pinned();
Expand All @@ -331,7 +331,7 @@ void TensorVector<Backend>::ShareData(TensorList<Backend> &in_tl) {
}

template <typename Backend>
void TensorVector<Backend>::ShareData(TensorVector<Backend> &tv) {
void TensorVector<Backend>::ShareData(const TensorVector<Backend> &tv) {
type_ = tv.type_;
state_ = tv.state_;
pinned_ = tv.is_pinned();
Expand Down
4 changes: 2 additions & 2 deletions dali/pipeline/data/tensor_vector.h
Original file line number Diff line number Diff line change
Expand Up @@ -194,9 +194,9 @@ class DLL_PUBLIC TensorVector {
template <typename SrcBackend>
void Copy(const TensorVector<SrcBackend> &in_tv, cudaStream_t stream);

void ShareData(TensorList<Backend> &in_tl);
void ShareData(const TensorList<Backend> &in_tl);

void ShareData(TensorVector<Backend> &tv);
void ShareData(const TensorVector<Backend> &tv);

TensorVector<Backend> &operator=(TensorVector<Backend> &&other) noexcept;

Expand Down
8 changes: 4 additions & 4 deletions dali/pipeline/executor/executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -301,9 +301,9 @@ void Executor<WorkspacePolicy, QueuePolicy>::RunHelper(OpNode &op_node, Workspac
for (int i = 0; i < spec.NumRegularInput(); i++) {
bool had_empty_layout = false;
if (ws.template InputIsType<CPUBackend>(i)) {
had_empty_layout = SetDefaultLayoutIfNeeded(ws.template Input<CPUBackend>(i), schema, i);
had_empty_layout = SetDefaultLayoutIfNeeded(*ws.template InputPtr<CPUBackend>(i), schema, i);
} else {
had_empty_layout = SetDefaultLayoutIfNeeded(ws.template Input<GPUBackend>(i), schema, i);
had_empty_layout = SetDefaultLayoutIfNeeded(*ws.template InputPtr<GPUBackend>(i), schema, i);
}
if (had_empty_layout) empty_layout_in_idxs.push_back(i);
}
Expand Down Expand Up @@ -334,10 +334,10 @@ void Executor<WorkspacePolicy, QueuePolicy>::RunHelper(OpNode &op_node, Workspac

for (int i : empty_layout_in_idxs) {
if (ws.template InputIsType<CPUBackend>(i)) {
auto &in = ws.template Input<CPUBackend>(i);
auto &in = *ws.template InputPtr<CPUBackend>(i);
in.SetLayout({});
} else {
auto &in = ws.template Input<GPUBackend>(i);
auto &in = *ws.template InputPtr<GPUBackend>(i);
in.SetLayout({});
}
}
Expand Down
4 changes: 2 additions & 2 deletions dali/pipeline/workspace/sample_workspace.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ void MakeSampleView(SampleWorkspace& sample, HostWorkspace& batch, int data_idx,
int num_inputs = batch.NumInput();
for (int i = 0; i < num_inputs; i++) {
if (batch.InputIsType<CPUBackend>(i)) {
auto &input_ref = batch.Input<CPUBackend>(i);
auto &input_ref = *batch.InputPtr<CPUBackend>(i);
sample.AddInput(&input_ref[data_idx]);
} else {
auto &input_ref = batch.Input<GPUBackend>(i);
auto &input_ref = *batch.InputPtr<GPUBackend>(i);
sample.AddInput(&input_ref[data_idx]);
}
}
Expand Down
2 changes: 1 addition & 1 deletion dali/pipeline/workspace/workspace.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ class WorkspaceBase : public ArgumentWorkspace {
}

template <typename Backend>
auto& Input(int idx) const {
const auto& Input(int idx) const {
return *InputHandle(idx, Backend{});
}

Expand Down
2 changes: 1 addition & 1 deletion dali/util/pybind.h
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ py::capsule TensorToDLPackView(Tensor<Backend> &tensor) {
}

template <typename Backend>
py::list TensorListToDLPackView(TensorList<Backend> &tensors) {
py::list TensorListToDLPackView(const TensorList<Backend> &tensors) {
py::list result;
auto dl_tensors = GetDLTensorListView(tensors);
for (DLMTensorPtr &dl_tensor : dl_tensors) {
Expand Down