From 95d7b8ba71950cd04c9c8b6c67f1aa11b9b5238a Mon Sep 17 00:00:00 2001 From: Joaquin Anton Date: Tue, 16 Feb 2021 14:04:31 +0100 Subject: [PATCH] More changes Signed-off-by: Joaquin Anton --- dali/python/nvidia/dali/plugin/base_iterator.py | 2 +- dali/python/nvidia/dali/plugin/mxnet.py | 6 +++--- dali/python/nvidia/dali/plugin/paddle.py | 4 ++-- dali/python/nvidia/dali/plugin/pytorch.py | 4 ++-- dali/python/nvidia/dali/types.py | 4 ++-- dali/test/python/test_torch_pipeline_rnnt.py | 2 +- docs/advanced_topics_performance_tuning.rst | 2 +- docs/examples/getting started.ipynb | 6 +++--- docs/math.rst | 4 +--- docs/pipeline.rst | 13 +++++-------- docs/supported_ops.rst | 4 +++- 11 files changed, 24 insertions(+), 27 deletions(-) diff --git a/dali/python/nvidia/dali/plugin/base_iterator.py b/dali/python/nvidia/dali/plugin/base_iterator.py index af7ff261cb..29b47e874d 100644 --- a/dali/python/nvidia/dali/plugin/base_iterator.py +++ b/dali/python/nvidia/dali/plugin/base_iterator.py @@ -48,7 +48,7 @@ class _DaliBaseIterator(object): Parameters ---------- - pipelines : list of nvidia.dali.pipeline.Pipeline + pipelines : list of nvidia.dali.Pipeline List of pipelines to use output_map : list of (str, str) List of pairs (output_name, tag) which maps consecutive diff --git a/dali/python/nvidia/dali/plugin/mxnet.py b/dali/python/nvidia/dali/plugin/mxnet.py index 82804f4f91..71e8793383 100644 --- a/dali/python/nvidia/dali/plugin/mxnet.py +++ b/dali/python/nvidia/dali/plugin/mxnet.py @@ -130,7 +130,7 @@ class DALIGenericIterator(_DALIMXNetIteratorBase): Parameters ---------- - pipelines : list of nvidia.dali.pipeline.Pipeline + pipelines : list of nvidia.dali.Pipeline List of pipelines to use output_map : list of (str, str) List of pairs (output_name, tag) which maps consecutive @@ -396,7 +396,7 @@ class DALIClassificationIterator(DALIGenericIterator): Parameters ---------- - pipelines : list of nvidia.dali.pipeline.Pipeline + pipelines : list of nvidia.dali.Pipeline List of pipelines to use size : int, default = -1 Number of samples in the shard for the wrapped pipeline (if there is more than one it is a sum) @@ -537,7 +537,7 @@ class DALIGluonIterator(_DALIMXNetIteratorBase): Parameters ---------- - pipelines : list of nvidia.dali.pipeline.Pipeline + pipelines : list of nvidia.dali.Pipeline List of pipelines to use size : int, default = -1 Number of samples in the shard for the wrapped pipeline (if there is more than one it is a sum) diff --git a/dali/python/nvidia/dali/plugin/paddle.py b/dali/python/nvidia/dali/plugin/paddle.py index 99e0408419..9dbe727272 100644 --- a/dali/python/nvidia/dali/plugin/paddle.py +++ b/dali/python/nvidia/dali/plugin/paddle.py @@ -141,7 +141,7 @@ class DALIGenericIterator(_DaliBaseIterator): Parameters ---------- - pipelines : list of nvidia.dali.pipeline.Pipeline + pipelines : list of nvidia.dali.Pipeline List of pipelines to use output_map : list of str or pair of type (str, int) The strings maps consecutive outputs of DALI pipelines to @@ -385,7 +385,7 @@ class DALIClassificationIterator(DALIGenericIterator): Parameters ---------- - pipelines : list of nvidia.dali.pipeline.Pipeline + pipelines : list of nvidia.dali.Pipeline List of pipelines to use size : int, default = -1 Number of samples in the shard for the wrapped pipeline (if there is more than one it is a sum) diff --git a/dali/python/nvidia/dali/plugin/pytorch.py b/dali/python/nvidia/dali/plugin/pytorch.py index ed691a03f1..70afe772b4 100644 --- a/dali/python/nvidia/dali/plugin/pytorch.py +++ b/dali/python/nvidia/dali/plugin/pytorch.py @@ -75,7 +75,7 @@ class DALIGenericIterator(_DaliBaseIterator): Parameters ---------- - pipelines : list of nvidia.dali.pipeline.Pipeline + pipelines : list of nvidia.dali.Pipeline List of pipelines to use output_map : list of str List of strings which maps consecutive outputs @@ -295,7 +295,7 @@ class DALIClassificationIterator(DALIGenericIterator): Parameters ---------- - pipelines : list of nvidia.dali.pipeline.Pipeline + pipelines : list of nvidia.dali.Pipeline List of pipelines to use size : int, default = -1 Number of samples in the shard for the wrapped pipeline (if there is more than one it is a sum) diff --git a/dali/python/nvidia/dali/types.py b/dali/python/nvidia/dali/types.py index 9fc60ec766..54508855d6 100644 --- a/dali/python/nvidia/dali/types.py +++ b/dali/python/nvidia/dali/types.py @@ -141,7 +141,7 @@ class ScalarConstant(object): Wrapper for a constant value that can be used in DALI :ref:`mathematical expressions` and applied element-wise to the results of DALI Operators representing Tensors in -:meth:`nvidia.dali.pipeline.Pipeline.define_graph` step. +:meth:`nvidia.dali.Pipeline.define_graph` step. ScalarConstant indicates what type should the value be treated as with respect to type promotions. The actual values passed to the backend from python @@ -445,7 +445,7 @@ def _is_scalar_value(value): def Constant(value, dtype = None, shape = None, layout = None, device = None, **kwargs): """Wraps a constant value which can then be used in -:meth:`nvidia.dali.pipeline.Pipeline.define_graph` pipeline definition step. +:meth:`nvidia.dali.Pipeline.define_graph` pipeline definition step. If the `value` argument is a scalar and neither `shape`, `layout` nor `device` is provided, the function will return a :class:`ScalarConstant` diff --git a/dali/test/python/test_torch_pipeline_rnnt.py b/dali/test/python/test_torch_pipeline_rnnt.py index f9b8e4541d..24600f2da8 100644 --- a/dali/test/python/test_torch_pipeline_rnnt.py +++ b/dali/test/python/test_torch_pipeline_rnnt.py @@ -159,7 +159,7 @@ def forward(self, inp, seq_len): return x.to(dtype) -class RnntTrainPipeline(nvidia.dali.pipeline.Pipeline): +class RnntTrainPipeline(nvidia.dali.Pipeline): def __init__(self, device_id, n_devices, diff --git a/docs/advanced_topics_performance_tuning.rst b/docs/advanced_topics_performance_tuning.rst index f121444551..e99007ea73 100644 --- a/docs/advanced_topics_performance_tuning.rst +++ b/docs/advanced_topics_performance_tuning.rst @@ -106,7 +106,7 @@ To determine the amount of memory output that each operator needs, complete the 1) Create the pipeline by setting ``enable_memory_stats`` to True. 2) Query the pipeline for the operator's output memory statistics by calling the - :meth:`nvidia.dali.pipeline.Pipeline.executor_statistics` method on the pipeline. + :meth:`nvidia.dali.Pipeline.executor_statistics` method on the pipeline. The ``max_real_memory_size`` value represents the biggest tensor in the batch for the outputs that allocate memory per sample and not for the entire batch at the time or the average tensor size when diff --git a/docs/examples/getting started.ipynb b/docs/examples/getting started.ipynb index 6149b594c6..1b5f0b8cb2 100644 --- a/docs/examples/getting started.ipynb +++ b/docs/examples/getting started.ipynb @@ -81,10 +81,10 @@ " | in faster execution speed, but larger memory consumption.\n", " | `exec_async` : bool, optional, default = True\n", " | Whether to execute the pipeline asynchronously.\n", - " | This makes :meth:`nvidia.dali.pipeline.Pipeline.run` method\n", + " | This makes :meth:`nvidia.dali.Pipeline.run` method\n", " | run asynchronously with respect to the calling Python thread.\n", " | In order to synchronize with the pipeline one needs to call\n", - " | :meth:`nvidia.dali.pipeline.Pipeline.outputs` method.\n", + " | :meth:`nvidia.dali.Pipeline.outputs` method.\n", " | `bytes_per_sample` : int, optional, default = 0\n", " | A hint for DALI for how much memory to use for its tensors.\n", " | `set_affinity` : bool, optional, default = False\n", @@ -106,7 +106,7 @@ " | and `y` for mixed and gpu stages. It is not supported when both `exec_async`\n", " | and `exec_pipelined` are set to `False`.\n", " | Executor will buffer cpu and gpu stages separatelly,\n", - " | and will fill the buffer queues when the first :meth:`nvidia.dali.pipeline.Pipeline.run`\n", + " | and will fill the buffer queues when the first :meth:`nvidia.dali.Pipeline.run`\n", " | is issued.\n", " | \n", " | Methods defined here:\n", diff --git a/docs/math.rst b/docs/math.rst index d02c0fe975..6987247ebe 100644 --- a/docs/math.rst +++ b/docs/math.rst @@ -4,11 +4,9 @@ Mathematical Expressions ^^^^^^^^^^^^^^^^^^^^^^^^ DALI allows you to use regular Python arithmetic operations and other mathematical functions in -the :meth:`~nvidia.dali.pipeline.Pipeline.define_graph` method on the values that are returned +the :meth:`~nvidia.dali.Pipeline.define_graph` method on the values that are returned from invoking other operators. -Same expressions can be used with :ref:`functional api`. - The expressions that are used will be incorporated into the pipeline without needing to explicitly instantiate operators and will describe the element-wise operations on Tensors. diff --git a/docs/pipeline.rst b/docs/pipeline.rst index a4a7320da7..0776b5e46b 100644 --- a/docs/pipeline.rst +++ b/docs/pipeline.rst @@ -3,20 +3,17 @@ Pipeline ======== +.. currentmodule:: nvidia.dali + In DALI, any data processing task has a central object called Pipeline. Pipeline object is an instance of :class:`nvidia.dali.Pipeline` or a derived class. Pipeline encapsulates the data processing graph and the execution engine. You can define a DALI Pipeline in the following ways: -#. by implementing a function that uses DALI operators inside and decorating it with the -:meth:`pipeline_def` decorator -#. by instantiating :class:`Pipeline` object directly, building the graph and setting the pipeline -outputs with :meth:`Pipeline.set_outputs` -#. by inheriting from :class:`Pipeline` class and overriding :meth:`Pipeline.define_graph` -(this is the legacy way of defining DALI Pipelines) - -.. currentmodule:: nvidia.dali +#. By implementing a function that uses DALI operators inside and decorating it with the :meth:`pipeline_def` decorator. +#. By instantiating :class:`Pipeline` object directly, building the graph and setting the pipeline outputs with :meth:`Pipeline.set_outputs`. +#. By inheriting from :class:`Pipeline` class and overriding :meth:`Pipeline.define_graph` (this is the legacy way of defining DALI Pipelines). .. autoclass:: Pipeline :members: diff --git a/docs/supported_ops.rst b/docs/supported_ops.rst index ea13c1ce7c..ee0c24f40b 100644 --- a/docs/supported_ops.rst +++ b/docs/supported_ops.rst @@ -3,7 +3,9 @@ Operations .. currentmodule:: nvidia.dali -Operations can be used to define the data processing graphs within a DALI :ref:`Pipeline `. +Operations functions are used to define the data processing graph within a DALI :ref:`Pipeline `. +They accept as inputs and return as outputs :class:`~nvidia.dali.pipeline.DataNode` instances, which represent batches of Tensors. +It is worth noting that those operation functions can not be used to process data directly. The following table lists all available operations available in DALI: