From a26d1b3d6f790e3f91e84d83a23f950b8fbbc21c Mon Sep 17 00:00:00 2001 From: Krzysztof Lecki Date: Tue, 26 Oct 2021 16:31:28 +0200 Subject: [PATCH 1/7] Adjust constness of Input in workspace Signed-off-by: Krzysztof Lecki --- dali/operators/decoder/audio/audio_decoder_op.cc | 2 +- dali/operators/generic/join.cc | 4 ++-- .../geometry/affine_transforms/combine_transforms.cc | 2 +- .../geometry/affine_transforms/transform_base_op.h | 2 +- dali/operators/numba_function/numba_func.cc | 2 +- dali/pipeline/data/dltensor.cc | 4 ++-- dali/pipeline/data/dltensor.h | 6 +++--- dali/pipeline/data/tensor_list.h | 2 +- dali/pipeline/data/tensor_vector.cc | 4 ++-- dali/pipeline/data/tensor_vector.h | 4 ++-- dali/pipeline/executor/executor.cc | 8 ++++---- dali/pipeline/workspace/sample_workspace.cc | 4 ++-- dali/pipeline/workspace/workspace.h | 2 +- dali/util/pybind.h | 2 +- 14 files changed, 24 insertions(+), 24 deletions(-) diff --git a/dali/operators/decoder/audio/audio_decoder_op.cc b/dali/operators/decoder/audio/audio_decoder_op.cc index a624714e59..0f4d5c5a67 100644 --- a/dali/operators/decoder/audio/audio_decoder_op.cc +++ b/dali/operators/decoder/audio/audio_decoder_op.cc @@ -88,7 +88,7 @@ AudioDecoderCpu::SetupImpl(std::vector &output_desc, const workspace for (int i = 0; i < batch_size; i++) { auto &meta = sample_meta_[i] = - decoders_[i]->Open({reinterpret_cast(input[i].raw_mutable_data()), + decoders_[i]->Open({reinterpret_cast(input[i].raw_data()), input[i].shape().num_elements()}); TensorShape<> data_sample_shape = DecodedAudioShape( meta, use_resampling_ ? target_sample_rates_[i] : -1.0f, downmix_); diff --git a/dali/operators/generic/join.cc b/dali/operators/generic/join.cc index 81dd6db4ff..2c71a1601a 100644 --- a/dali/operators/generic/join.cc +++ b/dali/operators/generic/join.cc @@ -97,7 +97,7 @@ void TensorJoin::SetupTyped( copy_idx_ = 0; for (int i = 0; i < ninp; i++) { - auto tlv = view(ws.template Input(i)); + auto tlv = view(ws.template Input(i)); if (new_axis || tlv.num_elements() > 0) { // when concatenating, we can skip empty inputs if (inputs.empty()) copy_idx_ = i; @@ -109,7 +109,7 @@ void TensorJoin::SetupTyped( // No non-empty inputs? Use the first one, even if it's empty. if (inputs.empty()) { - inputs.push_back(view(ws.template Input(0))); + inputs.push_back(view(ws.template Input(0))); } kernels::tensor_join::JoinedShape(output_shape, [&](int index) { diff --git a/dali/operators/geometry/affine_transforms/combine_transforms.cc b/dali/operators/geometry/affine_transforms/combine_transforms.cc index 3a24aac4d1..087229801b 100644 --- a/dali/operators/geometry/affine_transforms/combine_transforms.cc +++ b/dali/operators/geometry/affine_transforms/combine_transforms.cc @@ -105,7 +105,7 @@ class CombineTransformsCPU : public Operator { in_views.reserve(ws.NumInput()); for (int input_idx = 0; input_idx < ws.NumInput(); input_idx++) { auto &in = ws.template Input(input_idx); - in_views.push_back(view(in)); + in_views.push_back(view(in)); } auto out_view = view(out); auto read_mat = [](affine_mat_t &next_mat, diff --git a/dali/operators/geometry/affine_transforms/transform_base_op.h b/dali/operators/geometry/affine_transforms/transform_base_op.h index d40ef8efe0..ec01cc0ac9 100644 --- a/dali/operators/geometry/affine_transforms/transform_base_op.h +++ b/dali/operators/geometry/affine_transforms/transform_base_op.h @@ -124,7 +124,7 @@ class TransformBaseOp : public Operator { auto out_view = view(out); if (has_input_) { auto &in = ws.template Input(0); - auto in_view = view(in); + auto in_view = view(in); for (int i = 0; i < nsamples_; i++) { int mat_idx = num_mats == 1 ? 0 : i; ApplyTransform(out_view[i].data, in_view[i].data, matrices[mat_idx]); diff --git a/dali/operators/numba_function/numba_func.cc b/dali/operators/numba_function/numba_func.cc index bb35925590..159d2d4e22 100644 --- a/dali/operators/numba_function/numba_func.cc +++ b/dali/operators/numba_function/numba_func.cc @@ -266,7 +266,7 @@ void NumbaFuncImpl::RunImpl(workspace_t &ws) { for (size_t in_id = 0; in_id < in_types_.size(); in_id++) { auto& in = ws.Input(in_id); for (int i = 0; i < N; i++) { - in_ptrs[N * in_id + i] = reinterpret_cast(in[i].raw_mutable_data()); + in_ptrs[N * in_id + i] = reinterpret_cast(in[i].raw_data()); } } diff --git a/dali/pipeline/data/dltensor.cc b/dali/pipeline/data/dltensor.cc index 46b56dbc9a..7d84048c69 100644 --- a/dali/pipeline/data/dltensor.cc +++ b/dali/pipeline/data/dltensor.cc @@ -44,12 +44,12 @@ void DLMTensorPtrDeleter(DLManagedTensor* dlm_tensor_ptr) { } } -DLMTensorPtr MakeDLTensor(void* data, DALIDataType type, +DLMTensorPtr MakeDLTensor(const void* data, DALIDataType type, bool device, int device_id, std::unique_ptr resource) { DLManagedTensor *dlm_tensor_ptr = &resource->dlm_tensor; DLTensor &dl_tensor = dlm_tensor_ptr->dl_tensor; - dl_tensor.data = data; + dl_tensor.data = const_cast(data); // This data is used only as input dl_tensor.ndim = resource->shape.size(); dl_tensor.shape = resource->shape.begin(); if (!resource->strides.empty()) { diff --git a/dali/pipeline/data/dltensor.h b/dali/pipeline/data/dltensor.h index b75cec6e39..1a7619cc11 100644 --- a/dali/pipeline/data/dltensor.h +++ b/dali/pipeline/data/dltensor.h @@ -43,7 +43,7 @@ struct DLTensorResource { virtual ~DLTensorResource() = default; }; -DLL_PUBLIC DLMTensorPtr MakeDLTensor(void *data, DALIDataType type, +DLL_PUBLIC DLMTensorPtr MakeDLTensor(const void *data, DALIDataType type, bool device, int device_id, std::unique_ptr resource); @@ -57,12 +57,12 @@ DLMTensorPtr GetDLTensorView(Tensor &tensor) { } template -std::vector GetDLTensorListView(TensorList &tensor_list) { +std::vector GetDLTensorListView(const TensorList &tensor_list) { std::vector dl_tensors{}; dl_tensors.reserve(tensor_list.num_samples()); for (size_t i = 0; i < tensor_list.num_samples(); ++i) { const auto &shape = tensor_list.tensor_shape(i); - dl_tensors.push_back(MakeDLTensor(tensor_list.raw_mutable_tensor(i), + dl_tensors.push_back(MakeDLTensor(tensor_list.raw_tensor(i), tensor_list.type(), std::is_same::value, tensor_list.device_id(), diff --git a/dali/pipeline/data/tensor_list.h b/dali/pipeline/data/tensor_list.h index ff7863bcb8..bf0d0ad907 100644 --- a/dali/pipeline/data/tensor_list.h +++ b/dali/pipeline/data/tensor_list.h @@ -226,7 +226,7 @@ class DLL_PUBLIC TensorList : private Buffer { * shared data or the call will fail. * Size can be set to 0 and type to NoType as intermediate step. */ - DLL_PUBLIC inline void ShareData(TensorList &other) { + DLL_PUBLIC inline void ShareData(const TensorList &other) { DALI_ENFORCE(IsValidType(other.type_), "To share data, " "the input TensorList must have a valid data type"); diff --git a/dali/pipeline/data/tensor_vector.cc b/dali/pipeline/data/tensor_vector.cc index 42dd5d8274..705b503cf3 100644 --- a/dali/pipeline/data/tensor_vector.cc +++ b/dali/pipeline/data/tensor_vector.cc @@ -320,7 +320,7 @@ void TensorVector::Copy(const TensorVector &in_tv, cudaStre template -void TensorVector::ShareData(TensorList &in_tl) { +void TensorVector::ShareData(const TensorList &in_tl) { SetContiguous(true); type_ = in_tl.type_info(); pinned_ = in_tl.is_pinned(); @@ -331,7 +331,7 @@ void TensorVector::ShareData(TensorList &in_tl) { } template -void TensorVector::ShareData(TensorVector &tv) { +void TensorVector::ShareData(const TensorVector &tv) { type_ = tv.type_; state_ = tv.state_; pinned_ = tv.is_pinned(); diff --git a/dali/pipeline/data/tensor_vector.h b/dali/pipeline/data/tensor_vector.h index ac6001f888..c6c5626861 100644 --- a/dali/pipeline/data/tensor_vector.h +++ b/dali/pipeline/data/tensor_vector.h @@ -194,9 +194,9 @@ class DLL_PUBLIC TensorVector { template void Copy(const TensorVector &in_tv, cudaStream_t stream); - void ShareData(TensorList &in_tl); + void ShareData(const TensorList &in_tl); - void ShareData(TensorVector &tv); + void ShareData(const TensorVector &tv); TensorVector &operator=(TensorVector &&other) noexcept; diff --git a/dali/pipeline/executor/executor.cc b/dali/pipeline/executor/executor.cc index 6203c524dd..3ecb502335 100644 --- a/dali/pipeline/executor/executor.cc +++ b/dali/pipeline/executor/executor.cc @@ -301,9 +301,9 @@ void Executor::RunHelper(OpNode &op_node, Workspac for (int i = 0; i < spec.NumRegularInput(); i++) { bool had_empty_layout = false; if (ws.template InputIsType(i)) { - had_empty_layout = SetDefaultLayoutIfNeeded(ws.template Input(i), schema, i); + had_empty_layout = SetDefaultLayoutIfNeeded(*ws.template InputPtr(i), schema, i); } else { - had_empty_layout = SetDefaultLayoutIfNeeded(ws.template Input(i), schema, i); + had_empty_layout = SetDefaultLayoutIfNeeded(*ws.template InputPtr(i), schema, i); } if (had_empty_layout) empty_layout_in_idxs.push_back(i); } @@ -334,10 +334,10 @@ void Executor::RunHelper(OpNode &op_node, Workspac for (int i : empty_layout_in_idxs) { if (ws.template InputIsType(i)) { - auto &in = ws.template Input(i); + auto &in = *ws.template InputPtr(i); in.SetLayout({}); } else { - auto &in = ws.template Input(i); + auto &in = *ws.template InputPtr(i); in.SetLayout({}); } } diff --git a/dali/pipeline/workspace/sample_workspace.cc b/dali/pipeline/workspace/sample_workspace.cc index eaae1ba117..d028caba42 100644 --- a/dali/pipeline/workspace/sample_workspace.cc +++ b/dali/pipeline/workspace/sample_workspace.cc @@ -24,10 +24,10 @@ void MakeSampleView(SampleWorkspace& sample, HostWorkspace& batch, int data_idx, int num_inputs = batch.NumInput(); for (int i = 0; i < num_inputs; i++) { if (batch.InputIsType(i)) { - auto &input_ref = batch.Input(i); + auto &input_ref = *batch.InputPtr(i); sample.AddInput(&input_ref[data_idx]); } else { - auto &input_ref = batch.Input(i); + auto &input_ref = *batch.InputPtr(i); sample.AddInput(&input_ref[data_idx]); } } diff --git a/dali/pipeline/workspace/workspace.h b/dali/pipeline/workspace/workspace.h index 394eebc1e1..18f93f764f 100644 --- a/dali/pipeline/workspace/workspace.h +++ b/dali/pipeline/workspace/workspace.h @@ -142,7 +142,7 @@ class WorkspaceBase : public ArgumentWorkspace { } template - auto& Input(int idx) const { + const auto& Input(int idx) const { return *InputHandle(idx, Backend{}); } diff --git a/dali/util/pybind.h b/dali/util/pybind.h index 72fcc7e7e8..14cff49f38 100644 --- a/dali/util/pybind.h +++ b/dali/util/pybind.h @@ -207,7 +207,7 @@ py::capsule TensorToDLPackView(Tensor &tensor) { } template -py::list TensorListToDLPackView(TensorList &tensors) { +py::list TensorListToDLPackView(const TensorList &tensors) { py::list result; auto dl_tensors = GetDLTensorListView(tensors); for (DLMTensorPtr &dl_tensor : dl_tensors) { From 2c96bb6165a7015ce43e4d3a4b706251bff2620b Mon Sep 17 00:00:00 2001 From: Krzysztof Lecki Date: Tue, 26 Oct 2021 17:34:12 +0200 Subject: [PATCH 2/7] Cast fix Signed-off-by: Krzysztof Lecki --- dali/operators/decoder/audio/audio_decoder_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dali/operators/decoder/audio/audio_decoder_op.cc b/dali/operators/decoder/audio/audio_decoder_op.cc index 0f4d5c5a67..1e07d6fedf 100644 --- a/dali/operators/decoder/audio/audio_decoder_op.cc +++ b/dali/operators/decoder/audio/audio_decoder_op.cc @@ -88,7 +88,7 @@ AudioDecoderCpu::SetupImpl(std::vector &output_desc, const workspace for (int i = 0; i < batch_size; i++) { auto &meta = sample_meta_[i] = - decoders_[i]->Open({reinterpret_cast(input[i].raw_data()), + decoders_[i]->Open({static_cast(input[i].raw_data()), input[i].shape().num_elements()}); TensorShape<> data_sample_shape = DecodedAudioShape( meta, use_resampling_ ? target_sample_rates_[i] : -1.0f, downmix_); From cf792b06e69a7e5d194d2cd7dd802854669707ff Mon Sep 17 00:00:00 2001 From: Krzysztof Lecki Date: Tue, 26 Oct 2021 17:55:09 +0200 Subject: [PATCH 3/7] Move const cast by one level Signed-off-by: Krzysztof Lecki --- dali/pipeline/data/dltensor.cc | 4 ++-- dali/pipeline/data/dltensor.h | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/dali/pipeline/data/dltensor.cc b/dali/pipeline/data/dltensor.cc index 7d84048c69..46b56dbc9a 100644 --- a/dali/pipeline/data/dltensor.cc +++ b/dali/pipeline/data/dltensor.cc @@ -44,12 +44,12 @@ void DLMTensorPtrDeleter(DLManagedTensor* dlm_tensor_ptr) { } } -DLMTensorPtr MakeDLTensor(const void* data, DALIDataType type, +DLMTensorPtr MakeDLTensor(void* data, DALIDataType type, bool device, int device_id, std::unique_ptr resource) { DLManagedTensor *dlm_tensor_ptr = &resource->dlm_tensor; DLTensor &dl_tensor = dlm_tensor_ptr->dl_tensor; - dl_tensor.data = const_cast(data); // This data is used only as input + dl_tensor.data = data; dl_tensor.ndim = resource->shape.size(); dl_tensor.shape = resource->shape.begin(); if (!resource->strides.empty()) { diff --git a/dali/pipeline/data/dltensor.h b/dali/pipeline/data/dltensor.h index 1a7619cc11..d14147c4b5 100644 --- a/dali/pipeline/data/dltensor.h +++ b/dali/pipeline/data/dltensor.h @@ -62,7 +62,8 @@ std::vector GetDLTensorListView(const TensorList &tensor_ dl_tensors.reserve(tensor_list.num_samples()); for (size_t i = 0; i < tensor_list.num_samples(); ++i) { const auto &shape = tensor_list.tensor_shape(i); - dl_tensors.push_back(MakeDLTensor(tensor_list.raw_tensor(i), + // The data is intended to be used only as input + dl_tensors.push_back(MakeDLTensor(const_cast(tensor_list.raw_tensor(i)), tensor_list.type(), std::is_same::value, tensor_list.device_id(), From 6983c69f393bd50c25c285124e90fc18d265d708 Mon Sep 17 00:00:00 2001 From: Krzysztof Lecki Date: Tue, 26 Oct 2021 17:56:24 +0200 Subject: [PATCH 4/7] comment Signed-off-by: Krzysztof Lecki --- dali/util/pybind.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dali/util/pybind.h b/dali/util/pybind.h index 14cff49f38..170fcd6021 100644 --- a/dali/util/pybind.h +++ b/dali/util/pybind.h @@ -206,6 +206,11 @@ py::capsule TensorToDLPackView(Tensor &tensor) { return DLTensorToCapsule(std::move(dl_tensor)); } +/** + * @brief Convert the TensorList to python list of tensors. + * + * The returned list is intended to be read-only + */ template py::list TensorListToDLPackView(const TensorList &tensors) { py::list result; From 4d698547f25357b6c18ff65a624362ac83dd35e9 Mon Sep 17 00:00:00 2001 From: Krzysztof Lecki Date: Tue, 26 Oct 2021 18:22:35 +0200 Subject: [PATCH 5/7] Fixup Signed-off-by: Krzysztof Lecki --- dali/pipeline/data/dltensor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dali/pipeline/data/dltensor.h b/dali/pipeline/data/dltensor.h index d14147c4b5..43d6c66a53 100644 --- a/dali/pipeline/data/dltensor.h +++ b/dali/pipeline/data/dltensor.h @@ -43,7 +43,7 @@ struct DLTensorResource { virtual ~DLTensorResource() = default; }; -DLL_PUBLIC DLMTensorPtr MakeDLTensor(const void *data, DALIDataType type, +DLL_PUBLIC DLMTensorPtr MakeDLTensor(void *data, DALIDataType type, bool device, int device_id, std::unique_ptr resource); From 77e3c2851c5d06bd51453d2e4f222c0427bc7228 Mon Sep 17 00:00:00 2001 From: Krzysztof Lecki Date: Wed, 27 Oct 2021 11:17:17 +0200 Subject: [PATCH 6/7] Add docs and UnsafeMutableInput Signed-off-by: Krzysztof Lecki --- dali/pipeline/executor/executor.cc | 10 +-- dali/pipeline/workspace/sample_workspace.cc | 4 +- dali/pipeline/workspace/workspace.h | 67 ++++++++++++++++++--- 3 files changed, 65 insertions(+), 16 deletions(-) diff --git a/dali/pipeline/executor/executor.cc b/dali/pipeline/executor/executor.cc index 3ecb502335..024026cf34 100644 --- a/dali/pipeline/executor/executor.cc +++ b/dali/pipeline/executor/executor.cc @@ -301,9 +301,11 @@ void Executor::RunHelper(OpNode &op_node, Workspac for (int i = 0; i < spec.NumRegularInput(); i++) { bool had_empty_layout = false; if (ws.template InputIsType(i)) { - had_empty_layout = SetDefaultLayoutIfNeeded(*ws.template InputPtr(i), schema, i); + had_empty_layout = + SetDefaultLayoutIfNeeded(ws.template UnsafeMutableInput(i), schema, i); } else { - had_empty_layout = SetDefaultLayoutIfNeeded(*ws.template InputPtr(i), schema, i); + had_empty_layout = + SetDefaultLayoutIfNeeded(ws.template UnsafeMutableInput(i), schema, i); } if (had_empty_layout) empty_layout_in_idxs.push_back(i); } @@ -334,10 +336,10 @@ void Executor::RunHelper(OpNode &op_node, Workspac for (int i : empty_layout_in_idxs) { if (ws.template InputIsType(i)) { - auto &in = *ws.template InputPtr(i); + auto &in = ws.template UnsafeMutableInput(i); in.SetLayout({}); } else { - auto &in = *ws.template InputPtr(i); + auto &in = ws.template UnsafeMutableInput(i); in.SetLayout({}); } } diff --git a/dali/pipeline/workspace/sample_workspace.cc b/dali/pipeline/workspace/sample_workspace.cc index d028caba42..7e75bc7c72 100644 --- a/dali/pipeline/workspace/sample_workspace.cc +++ b/dali/pipeline/workspace/sample_workspace.cc @@ -24,10 +24,10 @@ void MakeSampleView(SampleWorkspace& sample, HostWorkspace& batch, int data_idx, int num_inputs = batch.NumInput(); for (int i = 0; i < num_inputs; i++) { if (batch.InputIsType(i)) { - auto &input_ref = *batch.InputPtr(i); + auto &input_ref = batch.UnsafeMutableInput(i); sample.AddInput(&input_ref[data_idx]); } else { - auto &input_ref = *batch.InputPtr(i); + auto &input_ref = batch.UnsafeMutableInput(i); sample.AddInput(&input_ref[data_idx]); } } diff --git a/dali/pipeline/workspace/workspace.h b/dali/pipeline/workspace/workspace.h index 18f93f764f..a6dce9fae0 100644 --- a/dali/pipeline/workspace/workspace.h +++ b/dali/pipeline/workspace/workspace.h @@ -141,26 +141,32 @@ class WorkspaceBase : public ArgumentWorkspace { gpu_outputs_index_.clear(); } + /** @defgroup InputOutput Input and output APIs + * Functions used to access inputs and outputs of the operator in its implementation. + * The inputs are read-only while outputs can be modified. + * @{ + */ + + /** + * @brief Returns the const reference to the input batch at the position `idx`. + * + * The operator implementation can use this function to access its inputs. + */ template const auto& Input(int idx) const { return *InputHandle(idx, Backend{}); } + /** + * @brief Returns the mutable reference to the output batch at the position `idx`. + * + * The operator implementation can use this function to access its outputs. + */ template auto& Output(int idx) const { return *OutputHandle(idx, Backend{}); } - template - const InputType& InputPtr(int idx) const { - return InputHandle(idx, Backend{}); - } - - template - const OutputType& OutputPtr(int idx) const { - return OutputHandle(idx, Backend{}); - } - /** * @brief Returns the number of inputs. */ @@ -175,6 +181,47 @@ class WorkspaceBase : public ArgumentWorkspace { return output_index_map_.size(); } + + /** @} */ // end of InputOutput + + /** @defgroup InputOutputInternal Internal API for input and output access + * Functions allowing mutable access to both inputs and outputs that should not be used in + * operator implementation. + * @{ + */ + + /** + * @brief Returns the mutable reference to the input batch at the position `idx`. + * + * Intended only for executor and other internal APIs. + */ + template + auto& UnsafeMutableInput(int idx) const { + return *InputHandle(idx, Backend{}); + } + + /** + * @brief Returns the underlying handle to the input batch at the position `idx`. + * + * Intended only for executor and other internal APIs. + */ + template + const InputType& InputPtr(int idx) const { + return InputHandle(idx, Backend{}); + } + + /** + * @brief Returns the underlying handle to the output batch at the position `idx`. + * + * Intended only for executor and other internal APIs. + */ + template + const OutputType& OutputPtr(int idx) const { + return OutputHandle(idx, Backend{}); + } + + /** @} */ // end of InputOutputInternal + /** * Returns shape of input at given index * @return TensorShape<> for SampleWorkspace, TensorListShape<> for other Workspaces From 608455031bb016727c8034bdd184eb98abd7414a Mon Sep 17 00:00:00 2001 From: Krzysztof Lecki Date: Wed, 27 Oct 2021 11:33:13 +0200 Subject: [PATCH 7/7] Allow DLPack mutable access Signed-off-by: Krzysztof Lecki --- dali/operators/python_function/dltensor_function.cc | 12 ++++++------ dali/pipeline/data/dltensor.h | 5 ++--- dali/util/pybind.h | 7 +------ 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/dali/operators/python_function/dltensor_function.cc b/dali/operators/python_function/dltensor_function.cc index 3674357d60..0adab7da4f 100644 --- a/dali/operators/python_function/dltensor_function.cc +++ b/dali/operators/python_function/dltensor_function.cc @@ -78,8 +78,8 @@ py::list PrepareDLTensorInputs(HostWorkspace &ws) { for (Index idx = 0; idx < ws.NumInput(); ++idx) { py::list dl_tensor_list; for (Index i = 0; i < ws.GetInputBatchSize(idx); ++i) { - auto &t = ws.Input(idx)[i]; - auto dl_capsule = TensorToDLPackView(const_cast&>(t)); + auto &t = ws.UnsafeMutableInput(idx)[i]; + auto dl_capsule = TensorToDLPackView(t); dl_tensor_list.append(dl_capsule); } input_tuple.append(dl_tensor_list); @@ -91,7 +91,7 @@ template <> py::list PrepareDLTensorInputs(DeviceWorkspace &ws) { py::list input_tuple; for (Index idx = 0; idx < ws.NumInput(); ++idx) { - auto &tlist = ws.Input(idx); + auto &tlist = ws.UnsafeMutableInput(idx); py::list dl_tensor_list = TensorListToDLPackView(tlist); input_tuple.append(dl_tensor_list); } @@ -106,8 +106,8 @@ py::list PrepareDLTensorInputsPerSample(HostWorkspace &ws) { for (Index s = 0; s < batch_size; ++s) { py::list tuple; for (Index idx = 0; idx < ws.NumInput(); ++idx) { - auto &t = ws.Input(idx)[s]; - auto dl_capsule = TensorToDLPackView(const_cast&>(t)); + auto &t = ws.UnsafeMutableInput(idx)[s]; + auto dl_capsule = TensorToDLPackView(t); tuple.append(dl_capsule); } input_tuples.append(tuple); @@ -122,7 +122,7 @@ py::list PrepareDLTensorInputsPerSample(DeviceWorkspace &ws) { Index batch_size = ws.Input(0).num_samples(); input_tuples.resize(batch_size); for (Index idx = 0; idx < ws.NumInput(); ++idx) { - py::list dl_tensor_list = TensorListToDLPackView(ws.Input(idx)); + py::list dl_tensor_list = TensorListToDLPackView(ws.UnsafeMutableInput(idx)); for (Index s = 0; s < batch_size; ++s) { input_tuples[s].append(dl_tensor_list[s]); } diff --git a/dali/pipeline/data/dltensor.h b/dali/pipeline/data/dltensor.h index 43d6c66a53..b75cec6e39 100644 --- a/dali/pipeline/data/dltensor.h +++ b/dali/pipeline/data/dltensor.h @@ -57,13 +57,12 @@ DLMTensorPtr GetDLTensorView(Tensor &tensor) { } template -std::vector GetDLTensorListView(const TensorList &tensor_list) { +std::vector GetDLTensorListView(TensorList &tensor_list) { std::vector dl_tensors{}; dl_tensors.reserve(tensor_list.num_samples()); for (size_t i = 0; i < tensor_list.num_samples(); ++i) { const auto &shape = tensor_list.tensor_shape(i); - // The data is intended to be used only as input - dl_tensors.push_back(MakeDLTensor(const_cast(tensor_list.raw_tensor(i)), + dl_tensors.push_back(MakeDLTensor(tensor_list.raw_mutable_tensor(i), tensor_list.type(), std::is_same::value, tensor_list.device_id(), diff --git a/dali/util/pybind.h b/dali/util/pybind.h index 170fcd6021..72fcc7e7e8 100644 --- a/dali/util/pybind.h +++ b/dali/util/pybind.h @@ -206,13 +206,8 @@ py::capsule TensorToDLPackView(Tensor &tensor) { return DLTensorToCapsule(std::move(dl_tensor)); } -/** - * @brief Convert the TensorList to python list of tensors. - * - * The returned list is intended to be read-only - */ template -py::list TensorListToDLPackView(const TensorList &tensors) { +py::list TensorListToDLPackView(TensorList &tensors) { py::list result; auto dl_tensors = GetDLTensorListView(tensors); for (DLMTensorPtr &dl_tensor : dl_tensors) {