From e3f8c8a4cc3ad8b9fff291ae8f802be0206d2573 Mon Sep 17 00:00:00 2001 From: Jessica Lin Date: Fri, 28 Feb 2020 13:14:37 -0800 Subject: [PATCH 1/8] Initial reorg - update TOC, names, and order --- index.rst | 262 ++++++++++-------------------------------------------- 1 file changed, 47 insertions(+), 215 deletions(-) diff --git a/index.rst b/index.rst index 0867c1132d..0b772002f8 100644 --- a/index.rst +++ b/index.rst @@ -25,7 +25,7 @@ Some considerations: * Finally, here's a link to the `PyTorch Release Notes `_ -Getting Started +Learning PyTorch ------------------ .. customgalleryitem:: @@ -33,6 +33,16 @@ Getting Started :tooltip: Understand PyTorch’s Tensor library and neural networks at a high level :description: :doc:`/beginner/deep_learning_60min_blitz` +.. customgalleryitem:: + :tooltip: This tutorial introduces the fundamental concepts of PyTorch through self-contained examples + :figure: /_static/img/thumbnails/examples.png + :description: :doc:`/beginner/pytorch_with_examples` + +.. customgalleryitem:: + :figure: /_static/img/torch.nn.png + :tooltip: Use torch.nn to create and train a neural network + :description: :doc:`beginner/nn_tutorial` + .. customgalleryitem:: :figure: /_static/img/thumbnails/landmarked_face2.png :tooltip: Learn how to load and preprocess/augment data from a non trivial dataset @@ -48,7 +58,7 @@ Getting Started
-Image +Image/Video ---------------------- .. customgalleryitem:: @@ -81,17 +91,16 @@ Image :figure: /_static/img/dcgan_generator.png :description: :doc:`beginner/dcgan_faces_tutorial` -.. raw:: html - -
- -Named Tensor (experimental) ----------------------- +.. customgalleryitem:: + :tooltip: (experimental) Static Quantization with Eager Mode in PyTorch + :figure: /_static/img/qat.png + :description: :doc:`advanced/static_quantization_tutorial` .. customgalleryitem:: - :figure: /_static/img/named_tensor.png - :tooltip: Named Tensor - :description: :doc:`intermediate/named_tensor_tutorial` + :tooltip: Perform quantized transfer learning with feature extractor + :description: :doc:`/intermediate/quantized_transfer_learning_tutorial` + :figure: /_static/img/quantized_transfer_learning.png + .. raw:: html @@ -113,6 +122,11 @@ Audio Text ---------------------- +.. customgalleryitem:: + :tooltip: Transformer Tutorial + :figure: /_static/img/transformer_architecture.jpg + :description: :doc:`/beginner/transformer_tutorial` + .. customgalleryitem:: :figure: /_static/img/rnnclass.png :tooltip: Build and train a basic character-level RNN to classify words @@ -136,172 +150,38 @@ Text :figure: /_static/img/thumbnails/german_to_english_translation.png :description: :doc:`/beginner/torchtext_translation_tutorial` -.. customgalleryitem:: - :tooltip: Transformer Tutorial - :figure: /_static/img/transformer_architecture.jpg - :description: :doc:`/beginner/transformer_tutorial` - -.. raw:: html - -
- - -Reinforcement Learning ----------------------- - -.. customgalleryitem:: - :tooltip: Use PyTorch to train a Deep Q Learning (DQN) agent - :figure: /_static/img/cartpole.gif - :description: :doc:`intermediate/reinforcement_q_learning` - -.. raw:: html - -
- -Deploying PyTorch Models in Production --------------------------------------- - -.. customgalleryitem:: - :tooltip: Deploying PyTorch and Building a REST API using Flask - :description: :doc:`/intermediate/flask_rest_api_tutorial` - :figure: _static/img/flask.png - -.. customgalleryitem:: - :tooltip: Introduction to TorchScript - :description: :doc:`beginner/Intro_to_TorchScript_tutorial` - :figure: _static/img/torchscript.png - -.. customgalleryitem:: - :tooltip: Loading a PyTorch model in C++ - :description: :doc:`advanced/cpp_export` - :figure: _static/img/torchscript_to_cpp.png - -.. customgalleryitem:: - :figure: /_static/img/cat.jpg - :tooltip: Exporting a Model from PyTorch to ONNX and Running it using ONNXRuntime - :description: :doc:`advanced/super_resolution_with_onnxruntime` - -.. raw:: html - -
- -Parallel and Distributed Training ---------------------------------- - -.. customgalleryitem:: - :tooltip: Model parallel training on multiple GPUs - :description: :doc:`/intermediate/model_parallel_tutorial` - :figure: _static/img/distributed/DistPyTorch.jpg - -.. customgalleryitem:: - :tooltip: Getting started with DistributedDataParallel - :description: :doc:`/intermediate/ddp_tutorial` - :figure: _static/img/distributed/DistPyTorch.jpg - -.. customgalleryitem:: - :tooltip: Parallelize computations across processes and clusters of machines - :description: :doc:`/intermediate/dist_tuto` - :figure: _static/img/distributed/DistPyTorch.jpg - -.. customgalleryitem:: - :tooltip: Getting Started with Distributed RPC Framework - :description: :doc:`/intermediate/rpc_tutorial` - :figure: _static/img/distributed/DistPyTorch.jpg - -.. customgalleryitem:: - :tooltip: PyTorch distributed trainer with Amazon AWS - :description: :doc:`/beginner/aws_distributed_training_tutorial` - :figure: _static/img/distributed/DistPyTorch.jpg - -.. raw:: html - -
- -Extending PyTorch ----------------------- - -.. customgalleryitem:: - :tooltip: Implement custom operators in C++ or CUDA for TorchScript - :description: :doc:`/advanced/torch_script_custom_ops` - :figure: _static/img/cpp_logo.png - -.. customgalleryitem:: - :tooltip: Implement custom classes in C++ for TorchScript - :description: :doc:`/advanced/torch_script_custom_classes` - :figure: _static/img/cpp_logo.png - -.. customgalleryitem:: - :tooltip: Create extensions using numpy and scipy - :figure: /_static/img/scipynumpy.png - :description: :doc:`advanced/numpy_extensions_tutorial` - -.. customgalleryitem:: - :tooltip: Implement custom extensions in C++ or CUDA for eager PyTorch - :description: :doc:`/advanced/cpp_extension` - :figure: _static/img/cpp_logo.png - -.. raw:: html - -
- -Model Optimization ---------------------------- - .. customgalleryitem:: :tooltip: Perform dynamic quantization on a pre-trained PyTorch model :description: :doc:`/advanced/dynamic_quantization_tutorial` :figure: _static/img/quant_asym.png -.. customgalleryitem:: - :tooltip: (experimental) Static Quantization with Eager Mode in PyTorch - :figure: /_static/img/qat.png - :description: :doc:`advanced/static_quantization_tutorial` - -.. customgalleryitem:: - :tooltip: Perform quantized transfer learning with feature extractor - :description: :doc:`/intermediate/quantized_transfer_learning_tutorial` - :figure: /_static/img/quantized_transfer_learning.png - .. customgalleryitem:: :tooltip: Convert a well-known state-of-the-art model like BERT into dynamic quantized model :description: :doc:`/intermediate/dynamic_quantization_bert_tutorial` :figure: /_static/img/bert.png -.. customgalleryitem:: - :tooltip: Use pruning to sparsify your neural networks - :description: :doc:`/intermediate/pruning_tutorial` - :figure: _static/img/pruning.png - - .. raw:: html
- -PyTorch in Other Languages --------------------------- +Additional APIs +---------------------- .. customgalleryitem:: :tooltip: Using the PyTorch C++ Frontend :figure: /_static/img/cpp-pytorch.png :description: :doc:`advanced/cpp_frontend` -.. raw:: html - -
- -PyTorch Fundamentals In-Depth ------------------------------ - .. customgalleryitem:: - :tooltip: This tutorial introduces the fundamental concepts of PyTorch through self-contained examples - :figure: /_static/img/thumbnails/examples.png - :description: :doc:`/beginner/pytorch_with_examples` + :figure: /_static/img/named_tensor.png + :tooltip: Named Tensor + :description: :doc:`intermediate/named_tensor_tutorial` .. customgalleryitem:: - :figure: /_static/img/torch.nn.png - :tooltip: Use torch.nn to create and train a neural network - :description: :doc:`beginner/nn_tutorial` + :tooltip: Use pruning to sparsify your neural networks + :description: :doc:`/intermediate/pruning_tutorial` + :figure: _static/img/pruning.png + .. raw:: html @@ -315,17 +195,17 @@ PyTorch Fundamentals In-Depth :maxdepth: 2 :hidden: :includehidden: - :caption: Getting Started + :caption: Learning PyTorch beginner/deep_learning_60min_blitz - beginner/data_loading_tutorial - intermediate/tensorboard_tutorial + beginner/pytorch_with_examples + beginner/nn_tutorial .. toctree:: :maxdepth: 2 :includehidden: :hidden: - :caption: Image + :caption: Image/Video intermediate/torchvision_tutorial beginner/transfer_learning_tutorial @@ -333,6 +213,8 @@ PyTorch Fundamentals In-Depth advanced/neural_style_tutorial beginner/fgsm_tutorial beginner/dcgan_faces_tutorial + advanced/static_quantization_tutorial + intermediate/quantized_transfer_learning_tutorial .. toctree:: :maxdepth: 2 @@ -348,20 +230,14 @@ PyTorch Fundamentals In-Depth :hidden: :caption: Text + beginner/transformer_tutorial intermediate/char_rnn_classification_tutorial intermediate/char_rnn_generation_tutorial intermediate/seq2seq_translation_tutorial beginner/text_sentiment_ngrams_tutorial beginner/torchtext_translation_tutorial - beginner/transformer_tutorial - -.. toctree:: - :maxdepth: 2 - :includehidden: - :hidden: - :caption: Named Tensor (experimental) - - intermediate/named_tensor_tutorial + advanced/dynamic_quantization_tutorial + intermediate/dynamic_quantization_bert_tutorial .. toctree:: :maxdepth: 2 @@ -371,51 +247,7 @@ PyTorch Fundamentals In-Depth intermediate/reinforcement_q_learning -.. toctree:: - :maxdepth: 2 - :includehidden: - :hidden: - :caption: Deploying PyTorch Models in Production - - intermediate/flask_rest_api_tutorial - beginner/Intro_to_TorchScript_tutorial - advanced/cpp_export - advanced/super_resolution_with_onnxruntime - -.. toctree:: - :maxdepth: 2 - :includehidden: - :hidden: - :caption: Parallel and Distributed Training - - intermediate/model_parallel_tutorial - intermediate/ddp_tutorial - intermediate/dist_tuto - intermediate/rpc_tutorial - beginner/aws_distributed_training_tutorial -.. toctree:: - :maxdepth: 2 - :includehidden: - :hidden: - :caption: Extending PyTorch - - advanced/torch_script_custom_ops - advanced/torch_script_custom_classes - advanced/numpy_extensions_tutorial - advanced/cpp_extension - -.. toctree:: - :maxdepth: 2 - :includehidden: - :hidden: - :caption: Model Optimization - - advanced/dynamic_quantization_tutorial - advanced/static_quantization_tutorial - intermediate/quantized_transfer_learning_tutorial - intermediate/dynamic_quantization_bert_tutorial - intermediate/pruning_tutorial .. toctree:: :maxdepth: 2 @@ -423,13 +255,13 @@ PyTorch Fundamentals In-Depth :hidden: :caption: PyTorch in Other Languages - advanced/cpp_frontend .. toctree:: :maxdepth: 2 :includehidden: :hidden: - :caption: PyTorch Fundamentals In-Depth + :caption: Additional APIs - beginner/pytorch_with_examples - beginner/nn_tutorial + advanced/cpp_frontend + intermediate/named_tensor_tutorial + intermediate/pruning_tutorial From e45a4c03912144ff48c984c11935a63402467302 Mon Sep 17 00:00:00 2001 From: Jessica Lin Date: Fri, 28 Feb 2020 13:24:20 -0800 Subject: [PATCH 2/8] Updates to Image/Video section --- index.rst | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/index.rst b/index.rst index 0b772002f8..f80e9e059f 100644 --- a/index.rst +++ b/index.rst @@ -71,16 +71,6 @@ Image/Video :tooltip: In transfer learning, a model created from one task is used in another :description: :doc:`beginner/transfer_learning_tutorial` -.. customgalleryitem:: - :figure: /_static/img/stn/Five.gif - :tooltip: Learn how to augment your network using a visual attention mechanism called spatial transformer networks - :description: :doc:`intermediate/spatial_transformer_tutorial` - -.. customgalleryitem:: - :figure: /_static/img/neural-style/sphx_glr_neural_style_tutorial_004.png - :tooltip: How to implement the Neural-Style algorithm developed by Gatys, Ecker, and Bethge - :description: :doc:`advanced/neural_style_tutorial` - .. customgalleryitem:: :figure: /_static/img/panda.png :tooltip: Raise your awareness to the security vulnerabilities of ML models, and get insight into the hot topic of adversarial machine learning @@ -209,8 +199,6 @@ Additional APIs intermediate/torchvision_tutorial beginner/transfer_learning_tutorial - intermediate/spatial_transformer_tutorial - advanced/neural_style_tutorial beginner/fgsm_tutorial beginner/dcgan_faces_tutorial advanced/static_quantization_tutorial From 133cf60fa4c0ddc672a0cc16719e211dd847a2ac Mon Sep 17 00:00:00 2001 From: Jessica Lin Date: Fri, 28 Feb 2020 14:24:17 -0800 Subject: [PATCH 3/8] Remove Optional: Data Parallelism tutorial from 60 min blitz --- beginner_source/deep_learning_60min_blitz.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/beginner_source/deep_learning_60min_blitz.rst b/beginner_source/deep_learning_60min_blitz.rst index 50a04faca9..d07d34c007 100644 --- a/beginner_source/deep_learning_60min_blitz.rst +++ b/beginner_source/deep_learning_60min_blitz.rst @@ -30,7 +30,6 @@ Goal of this tutorial: /beginner/blitz/autograd_tutorial /beginner/blitz/neural_networks_tutorial /beginner/blitz/cifar10_tutorial - /beginner/blitz/data_parallel_tutorial .. galleryitem:: /beginner/blitz/tensor_tutorial.py :figure: /_static/img/tensor_illustration_flat.png @@ -44,9 +43,6 @@ Goal of this tutorial: .. galleryitem:: /beginner/blitz/cifar10_tutorial.py :figure: /_static/img/cifar10.png -.. galleryitem:: /beginner/blitz/data_parallel_tutorial.py - :figure: /_static/img/data_parallel.png - .. raw:: html
From 2dd100d967633c8bd4bf90cbb6c7dc1308aabf62 Mon Sep 17 00:00:00 2001 From: Jessica Lin Date: Tue, 10 Mar 2020 12:23:18 -0700 Subject: [PATCH 4/8] Add Reinforcement Learning section back in --- index.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/index.rst b/index.rst index f80e9e059f..bf3fad5773 100644 --- a/index.rst +++ b/index.rst @@ -150,6 +150,18 @@ Text :description: :doc:`/intermediate/dynamic_quantization_bert_tutorial` :figure: /_static/img/bert.png +.. raw:: html + +
+ +Reinforcement Learning +---------------------- + +.. customgalleryitem:: + :tooltip: Use PyTorch to train a Deep Q Learning (DQN) agent + :figure: /_static/img/cartpole.gif + :description: :doc:`intermediate/reinforcement_q_learning` + .. raw:: html
From a2b5261401e792131bd342013926451a912aa5ab Mon Sep 17 00:00:00 2001 From: Jessica Lin Date: Tue, 10 Mar 2020 23:08:50 -0700 Subject: [PATCH 5/8] Add recipes directory --- .jenkins/build.sh | 11 + conf.py | 4 +- index.rst | 31 +- recipes_source/README.txt | 7 + recipes_source/recipes/README.txt | 24 ++ recipes_source/recipes/autograd_tutorial.py | 198 ++++++++++ recipes_source/recipes/cifar10_tutorial.py | 358 ++++++++++++++++++ .../recipes/data_parallel_tutorial.py | 255 +++++++++++++ .../recipes/neural_networks_tutorial.py | 261 +++++++++++++ recipes_source/recipes/tensor_tutorial.py | 195 ++++++++++ recipes_source/recipes_main.rst | 30 ++ src/pytorch-sphinx-theme | 2 +- 12 files changed, 1362 insertions(+), 14 deletions(-) create mode 100644 recipes_source/README.txt create mode 100644 recipes_source/recipes/README.txt create mode 100644 recipes_source/recipes/autograd_tutorial.py create mode 100644 recipes_source/recipes/cifar10_tutorial.py create mode 100644 recipes_source/recipes/data_parallel_tutorial.py create mode 100644 recipes_source/recipes/neural_networks_tutorial.py create mode 100644 recipes_source/recipes/tensor_tutorial.py create mode 100644 recipes_source/recipes_main.rst diff --git a/.jenkins/build.sh b/.jenkins/build.sh index 67a42cea92..dbb185d8fb 100755 --- a/.jenkins/build.sh +++ b/.jenkins/build.sh @@ -76,6 +76,17 @@ if [[ "${JOB_BASE_NAME}" == *worker_* ]]; then FILES_TO_RUN+=($(basename $filename .py)) fi count=$((count+1)) + done + done + for filename in $(find recipes_source/ -name '*.py' -not -path '*/data/*'); do + if [ $(($count % $NUM_WORKERS)) != $WORKER_ID ]; then + echo "Removing runnable code from "$filename + python $DIR/remove_runnable_code.py $filename $filename + else + echo "Keeping "$filename + FILES_TO_RUN+=($(basename $filename .py)) + fi + count=$((count+1)) done echo "FILES_TO_RUN: " ${FILES_TO_RUN[@]} diff --git a/conf.py b/conf.py index 06ba0f2c7f..a041d96748 100644 --- a/conf.py +++ b/conf.py @@ -61,8 +61,8 @@ sphinx_gallery_conf = { 'examples_dirs': ['beginner_source', 'intermediate_source', - 'advanced_source'], - 'gallery_dirs': ['beginner', 'intermediate', 'advanced'], + 'advanced_source', 'recipes_source'], + 'gallery_dirs': ['beginner', 'intermediate', 'advanced','recipes'], 'filename_pattern': os.environ.get('GALLERY_PATTERN', r'tutorial.py'), 'backreferences_dir': False } diff --git a/index.rst b/index.rst index f80e9e059f..a7b564acde 100644 --- a/index.rst +++ b/index.rst @@ -43,11 +43,6 @@ Learning PyTorch :tooltip: Use torch.nn to create and train a neural network :description: :doc:`beginner/nn_tutorial` -.. customgalleryitem:: - :figure: /_static/img/thumbnails/landmarked_face2.png - :tooltip: Learn how to load and preprocess/augment data from a non trivial dataset - :description: :doc:`/beginner/data_loading_tutorial` - .. customgalleryitem:: :figure: /_static/img/thumbnails/pytorch_tensorboard.png :tooltip: Learn to use TensorBoard to visualize data and model training @@ -154,6 +149,18 @@ Text
+Reinforcement Learning +---------------------- + +.. customgalleryitem:: + :tooltip: Use PyTorch to train a Deep Q Learning (DQN) agent + :figure: /_static/img/cartpole.gif + :description: :doc:`intermediate/reinforcement_q_learning` + +.. raw:: html + +
+ Additional APIs ---------------------- @@ -190,6 +197,7 @@ Additional APIs beginner/deep_learning_60min_blitz beginner/pytorch_with_examples beginner/nn_tutorial + intermediate/tensorboard_tutorial .. toctree:: :maxdepth: 2 @@ -236,20 +244,21 @@ Additional APIs intermediate/reinforcement_q_learning - .. toctree:: :maxdepth: 2 :includehidden: :hidden: - :caption: PyTorch in Other Languages + :caption: Additional APIs + + advanced/cpp_frontend + intermediate/named_tensor_tutorial + intermediate/pruning_tutorial .. toctree:: :maxdepth: 2 :includehidden: :hidden: - :caption: Additional APIs + :caption: Recipes - advanced/cpp_frontend - intermediate/named_tensor_tutorial - intermediate/pruning_tutorial + recipes/recipes_main diff --git a/recipes_source/README.txt b/recipes_source/README.txt new file mode 100644 index 0000000000..5d46a28aad --- /dev/null +++ b/recipes_source/README.txt @@ -0,0 +1,7 @@ +Recipes +------------------ + +1. recipes/* and recipes_main.rst + PyTorch Recipes + https://pytorch.org/tutorials/recipes/recipes_main.html + diff --git a/recipes_source/recipes/README.txt b/recipes_source/recipes/README.txt new file mode 100644 index 0000000000..3383e324ce --- /dev/null +++ b/recipes_source/recipes/README.txt @@ -0,0 +1,24 @@ +Deep Learning with PyTorch: A 60 Minute Blitz +--------------------------------------------- + +1. tensor_tutorial.py + What is PyTorch? + https://pytorch.org/tutorials/beginner/blitz/tensor_tutorial.html + +2. autograd_tutorial.py + Autograd: Automatic Differentiation + https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html + +3. neural_networks_tutorial.py + Neural Networks + https://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html# + +4. autograd_tutorial.py + Automatic Differentiation + https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html + +5. cifar10_tutorial.py + Training a Classifier + https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html + + diff --git a/recipes_source/recipes/autograd_tutorial.py b/recipes_source/recipes/autograd_tutorial.py new file mode 100644 index 0000000000..98e70a251d --- /dev/null +++ b/recipes_source/recipes/autograd_tutorial.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +""" +Autograd: Automatic Differentiation +=================================== + +Central to all neural networks in PyTorch is the ``autograd`` package. +Let’s first briefly visit this, and we will then go to training our +first neural network. + + +The ``autograd`` package provides automatic differentiation for all operations +on Tensors. It is a define-by-run framework, which means that your backprop is +defined by how your code is run, and that every single iteration can be +different. + +Let us see this in more simple terms with some examples. + +Tensor +-------- + +``torch.Tensor`` is the central class of the package. If you set its attribute +``.requires_grad`` as ``True``, it starts to track all operations on it. When +you finish your computation you can call ``.backward()`` and have all the +gradients computed automatically. The gradient for this tensor will be +accumulated into ``.grad`` attribute. + +To stop a tensor from tracking history, you can call ``.detach()`` to detach +it from the computation history, and to prevent future computation from being +tracked. + +To prevent tracking history (and using memory), you can also wrap the code block +in ``with torch.no_grad():``. This can be particularly helpful when evaluating a +model because the model may have trainable parameters with +``requires_grad=True``, but for which we don't need the gradients. + +There’s one more class which is very important for autograd +implementation - a ``Function``. + +``Tensor`` and ``Function`` are interconnected and build up an acyclic +graph, that encodes a complete history of computation. Each tensor has +a ``.grad_fn`` attribute that references a ``Function`` that has created +the ``Tensor`` (except for Tensors created by the user - their +``grad_fn is None``). + +If you want to compute the derivatives, you can call ``.backward()`` on +a ``Tensor``. If ``Tensor`` is a scalar (i.e. it holds a one element +data), you don’t need to specify any arguments to ``backward()``, +however if it has more elements, you need to specify a ``gradient`` +argument that is a tensor of matching shape. +""" + +import torch + +############################################################### +# Create a tensor and set ``requires_grad=True`` to track computation with it +x = torch.ones(2, 2, requires_grad=True) +print(x) + +############################################################### +# Do a tensor operation: +y = x + 2 +print(y) + +############################################################### +# ``y`` was created as a result of an operation, so it has a ``grad_fn``. +print(y.grad_fn) + +############################################################### +# Do more operations on ``y`` +z = y * y * 3 +out = z.mean() + +print(z, out) + +################################################################ +# ``.requires_grad_( ... )`` changes an existing Tensor's ``requires_grad`` +# flag in-place. The input flag defaults to ``False`` if not given. +a = torch.randn(2, 2) +a = ((a * 3) / (a - 1)) +print(a.requires_grad) +a.requires_grad_(True) +print(a.requires_grad) +b = (a * a).sum() +print(b.grad_fn) + +############################################################### +# Gradients +# --------- +# Let's backprop now. +# Because ``out`` contains a single scalar, ``out.backward()`` is +# equivalent to ``out.backward(torch.tensor(1.))``. + +out.backward() + +############################################################### +# Print gradients d(out)/dx +# + +print(x.grad) + +############################################################### +# You should have got a matrix of ``4.5``. Let’s call the ``out`` +# *Tensor* “:math:`o`”. +# We have that :math:`o = \frac{1}{4}\sum_i z_i`, +# :math:`z_i = 3(x_i+2)^2` and :math:`z_i\bigr\rvert_{x_i=1} = 27`. +# Therefore, +# :math:`\frac{\partial o}{\partial x_i} = \frac{3}{2}(x_i+2)`, hence +# :math:`\frac{\partial o}{\partial x_i}\bigr\rvert_{x_i=1} = \frac{9}{2} = 4.5`. + +############################################################### +# Mathematically, if you have a vector valued function :math:`\vec{y}=f(\vec{x})`, +# then the gradient of :math:`\vec{y}` with respect to :math:`\vec{x}` +# is a Jacobian matrix: +# +# .. math:: +# J=\left(\begin{array}{ccc} +# \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{1}}{\partial x_{n}}\\ +# \vdots & \ddots & \vdots\\ +# \frac{\partial y_{m}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} +# \end{array}\right) +# +# Generally speaking, ``torch.autograd`` is an engine for computing +# vector-Jacobian product. That is, given any vector +# :math:`v=\left(\begin{array}{cccc} v_{1} & v_{2} & \cdots & v_{m}\end{array}\right)^{T}`, +# compute the product :math:`v^{T}\cdot J`. If :math:`v` happens to be +# the gradient of a scalar function :math:`l=g\left(\vec{y}\right)`, +# that is, +# :math:`v=\left(\begin{array}{ccc}\frac{\partial l}{\partial y_{1}} & \cdots & \frac{\partial l}{\partial y_{m}}\end{array}\right)^{T}`, +# then by the chain rule, the vector-Jacobian product would be the +# gradient of :math:`l` with respect to :math:`\vec{x}`: +# +# .. math:: +# J^{T}\cdot v=\left(\begin{array}{ccc} +# \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{1}}\\ +# \vdots & \ddots & \vdots\\ +# \frac{\partial y_{1}}{\partial x_{n}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} +# \end{array}\right)\left(\begin{array}{c} +# \frac{\partial l}{\partial y_{1}}\\ +# \vdots\\ +# \frac{\partial l}{\partial y_{m}} +# \end{array}\right)=\left(\begin{array}{c} +# \frac{\partial l}{\partial x_{1}}\\ +# \vdots\\ +# \frac{\partial l}{\partial x_{n}} +# \end{array}\right) +# +# (Note that :math:`v^{T}\cdot J` gives a row vector which can be +# treated as a column vector by taking :math:`J^{T}\cdot v`.) +# +# This characteristic of vector-Jacobian product makes it very +# convenient to feed external gradients into a model that has +# non-scalar output. + +############################################################### +# Now let's take a look at an example of vector-Jacobian product: + +x = torch.randn(3, requires_grad=True) + +y = x * 2 +while y.data.norm() < 1000: + y = y * 2 + +print(y) + +############################################################### +# Now in this case ``y`` is no longer a scalar. ``torch.autograd`` +# could not compute the full Jacobian directly, but if we just +# want the vector-Jacobian product, simply pass the vector to +# ``backward`` as argument: +v = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float) +y.backward(v) + +print(x.grad) + +############################################################### +# You can also stop autograd from tracking history on Tensors +# with ``.requires_grad=True`` either by wrapping the code block in +# ``with torch.no_grad():`` +print(x.requires_grad) +print((x ** 2).requires_grad) + +with torch.no_grad(): + print((x ** 2).requires_grad) + +############################################################### +# Or by using ``.detach()`` to get a new Tensor with the same +# content but that does not require gradients: +print(x.requires_grad) +y = x.detach() +print(y.requires_grad) +print(x.eq(y).all()) + + +############################################################### +# **Read Later:** +# +# Document about ``autograd.Function`` is at +# https://pytorch.org/docs/stable/autograd.html#function diff --git a/recipes_source/recipes/cifar10_tutorial.py b/recipes_source/recipes/cifar10_tutorial.py new file mode 100644 index 0000000000..730bf6ac98 --- /dev/null +++ b/recipes_source/recipes/cifar10_tutorial.py @@ -0,0 +1,358 @@ +# -*- coding: utf-8 -*- +""" +Training a Classifier +===================== + +This is it. You have seen how to define neural networks, compute loss and make +updates to the weights of the network. + +Now you might be thinking, + +What about data? +---------------- + +Generally, when you have to deal with image, text, audio or video data, +you can use standard python packages that load data into a numpy array. +Then you can convert this array into a ``torch.*Tensor``. + +- For images, packages such as Pillow, OpenCV are useful +- For audio, packages such as scipy and librosa +- For text, either raw Python or Cython based loading, or NLTK and + SpaCy are useful + +Specifically for vision, we have created a package called +``torchvision``, that has data loaders for common datasets such as +Imagenet, CIFAR10, MNIST, etc. and data transformers for images, viz., +``torchvision.datasets`` and ``torch.utils.data.DataLoader``. + +This provides a huge convenience and avoids writing boilerplate code. + +For this tutorial, we will use the CIFAR10 dataset. +It has the classes: ‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’, +‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’. The images in CIFAR-10 are of +size 3x32x32, i.e. 3-channel color images of 32x32 pixels in size. + +.. figure:: /_static/img/cifar10.png + :alt: cifar10 + + cifar10 + + +Training an image classifier +---------------------------- + +We will do the following steps in order: + +1. Load and normalizing the CIFAR10 training and test datasets using + ``torchvision`` +2. Define a Convolutional Neural Network +3. Define a loss function +4. Train the network on the training data +5. Test the network on the test data + +1. Loading and normalizing CIFAR10 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Using ``torchvision``, it’s extremely easy to load CIFAR10. +""" +import torch +import torchvision +import torchvision.transforms as transforms + +######################################################################## +# The output of torchvision datasets are PILImage images of range [0, 1]. +# We transform them to Tensors of normalized range [-1, 1]. +# .. note:: +# If running on Windows and you get a BrokenPipeError, try setting +# the num_worker of torch.utils.data.DataLoader() to 0. + +transform = transforms.Compose( + [transforms.ToTensor(), + transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + +trainset = torchvision.datasets.CIFAR10(root='./data', train=True, + download=True, transform=transform) +trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, + shuffle=True, num_workers=2) + +testset = torchvision.datasets.CIFAR10(root='./data', train=False, + download=True, transform=transform) +testloader = torch.utils.data.DataLoader(testset, batch_size=4, + shuffle=False, num_workers=2) + +classes = ('plane', 'car', 'bird', 'cat', + 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') + +######################################################################## +# Let us show some of the training images, for fun. + +import matplotlib.pyplot as plt +import numpy as np + +# functions to show an image + + +def imshow(img): + img = img / 2 + 0.5 # unnormalize + npimg = img.numpy() + plt.imshow(np.transpose(npimg, (1, 2, 0))) + plt.show() + + +# get some random training images +dataiter = iter(trainloader) +images, labels = dataiter.next() + +# show images +imshow(torchvision.utils.make_grid(images)) +# print labels +print(' '.join('%5s' % classes[labels[j]] for j in range(4))) + + +######################################################################## +# 2. Define a Convolutional Neural Network +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# Copy the neural network from the Neural Networks section before and modify it to +# take 3-channel images (instead of 1-channel images as it was defined). + +import torch.nn as nn +import torch.nn.functional as F + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x + + +net = Net() + +######################################################################## +# 3. Define a Loss function and optimizer +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# Let's use a Classification Cross-Entropy loss and SGD with momentum. + +import torch.optim as optim + +criterion = nn.CrossEntropyLoss() +optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + +######################################################################## +# 4. Train the network +# ^^^^^^^^^^^^^^^^^^^^ +# +# This is when things start to get interesting. +# We simply have to loop over our data iterator, and feed the inputs to the +# network and optimize. + +for epoch in range(2): # loop over the dataset multiple times + + running_loss = 0.0 + for i, data in enumerate(trainloader, 0): + # get the inputs; data is a list of [inputs, labels] + inputs, labels = data + + # zero the parameter gradients + optimizer.zero_grad() + + # forward + backward + optimize + outputs = net(inputs) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + # print statistics + running_loss += loss.item() + if i % 2000 == 1999: # print every 2000 mini-batches + print('[%d, %5d] loss: %.3f' % + (epoch + 1, i + 1, running_loss / 2000)) + running_loss = 0.0 + +print('Finished Training') + +######################################################################## +# Let's quickly save our trained model: + +PATH = './cifar_net.pth' +torch.save(net.state_dict(), PATH) + +######################################################################## +# See `here `_ +# for more details on saving PyTorch models. +# +# 5. Test the network on the test data +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# We have trained the network for 2 passes over the training dataset. +# But we need to check if the network has learnt anything at all. +# +# We will check this by predicting the class label that the neural network +# outputs, and checking it against the ground-truth. If the prediction is +# correct, we add the sample to the list of correct predictions. +# +# Okay, first step. Let us display an image from the test set to get familiar. + +dataiter = iter(testloader) +images, labels = dataiter.next() + +# print images +imshow(torchvision.utils.make_grid(images)) +print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4))) + +######################################################################## +# Next, let's load back in our saved model (note: saving and re-loading the model +# wasn't necessary here, we only did it to illustrate how to do so): + +net = Net() +net.load_state_dict(torch.load(PATH)) + +######################################################################## +# Okay, now let us see what the neural network thinks these examples above are: + +outputs = net(images) + +######################################################################## +# The outputs are energies for the 10 classes. +# The higher the energy for a class, the more the network +# thinks that the image is of the particular class. +# So, let's get the index of the highest energy: +_, predicted = torch.max(outputs, 1) + +print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] + for j in range(4))) + +######################################################################## +# The results seem pretty good. +# +# Let us look at how the network performs on the whole dataset. + +correct = 0 +total = 0 +with torch.no_grad(): + for data in testloader: + images, labels = data + outputs = net(images) + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + +print('Accuracy of the network on the 10000 test images: %d %%' % ( + 100 * correct / total)) + +######################################################################## +# That looks way better than chance, which is 10% accuracy (randomly picking +# a class out of 10 classes). +# Seems like the network learnt something. +# +# Hmmm, what are the classes that performed well, and the classes that did +# not perform well: + +class_correct = list(0. for i in range(10)) +class_total = list(0. for i in range(10)) +with torch.no_grad(): + for data in testloader: + images, labels = data + outputs = net(images) + _, predicted = torch.max(outputs, 1) + c = (predicted == labels).squeeze() + for i in range(4): + label = labels[i] + class_correct[label] += c[i].item() + class_total[label] += 1 + + +for i in range(10): + print('Accuracy of %5s : %2d %%' % ( + classes[i], 100 * class_correct[i] / class_total[i])) + +######################################################################## +# Okay, so what next? +# +# How do we run these neural networks on the GPU? +# +# Training on GPU +# ---------------- +# Just like how you transfer a Tensor onto the GPU, you transfer the neural +# net onto the GPU. +# +# Let's first define our device as the first visible cuda device if we have +# CUDA available: + +device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + +# Assuming that we are on a CUDA machine, this should print a CUDA device: + +print(device) + +######################################################################## +# The rest of this section assumes that ``device`` is a CUDA device. +# +# Then these methods will recursively go over all modules and convert their +# parameters and buffers to CUDA tensors: +# +# .. code:: python +# +# net.to(device) +# +# +# Remember that you will have to send the inputs and targets at every step +# to the GPU too: +# +# .. code:: python +# +# inputs, labels = data[0].to(device), data[1].to(device) +# +# Why dont I notice MASSIVE speedup compared to CPU? Because your network +# is really small. +# +# **Exercise:** Try increasing the width of your network (argument 2 of +# the first ``nn.Conv2d``, and argument 1 of the second ``nn.Conv2d`` – +# they need to be the same number), see what kind of speedup you get. +# +# **Goals achieved**: +# +# - Understanding PyTorch's Tensor library and neural networks at a high level. +# - Train a small neural network to classify images +# +# Training on multiple GPUs +# ------------------------- +# If you want to see even more MASSIVE speedup using all of your GPUs, +# please check out :doc:`data_parallel_tutorial`. +# +# Where do I go next? +# ------------------- +# +# - :doc:`Train neural nets to play video games ` +# - `Train a state-of-the-art ResNet network on imagenet`_ +# - `Train a face generator using Generative Adversarial Networks`_ +# - `Train a word-level language model using Recurrent LSTM networks`_ +# - `More examples`_ +# - `More tutorials`_ +# - `Discuss PyTorch on the Forums`_ +# - `Chat with other users on Slack`_ +# +# .. _Train a state-of-the-art ResNet network on imagenet: https://github.com/pytorch/examples/tree/master/imagenet +# .. _Train a face generator using Generative Adversarial Networks: https://github.com/pytorch/examples/tree/master/dcgan +# .. _Train a word-level language model using Recurrent LSTM networks: https://github.com/pytorch/examples/tree/master/word_language_model +# .. _More examples: https://github.com/pytorch/examples +# .. _More tutorials: https://github.com/pytorch/tutorials +# .. _Discuss PyTorch on the Forums: https://discuss.pytorch.org/ +# .. _Chat with other users on Slack: https://pytorch.slack.com/messages/beginner/ + +# %%%%%%INVISIBLE_CODE_BLOCK%%%%%% +del dataiter +# %%%%%%INVISIBLE_CODE_BLOCK%%%%%% diff --git a/recipes_source/recipes/data_parallel_tutorial.py b/recipes_source/recipes/data_parallel_tutorial.py new file mode 100644 index 0000000000..eebca8ea52 --- /dev/null +++ b/recipes_source/recipes/data_parallel_tutorial.py @@ -0,0 +1,255 @@ +""" +Optional: Data Parallelism +========================== +**Authors**: `Sung Kim `_ and `Jenny Kang `_ + +In this tutorial, we will learn how to use multiple GPUs using ``DataParallel``. + +It's very easy to use GPUs with PyTorch. You can put the model on a GPU: + +.. code:: python + + device = torch.device("cuda:0") + model.to(device) + +Then, you can copy all your tensors to the GPU: + +.. code:: python + + mytensor = my_tensor.to(device) + +Please note that just calling ``my_tensor.to(device)`` returns a new copy of +``my_tensor`` on GPU instead of rewriting ``my_tensor``. You need to assign it to +a new tensor and use that tensor on the GPU. + +It's natural to execute your forward, backward propagations on multiple GPUs. +However, Pytorch will only use one GPU by default. You can easily run your +operations on multiple GPUs by making your model run parallelly using +``DataParallel``: + +.. code:: python + + model = nn.DataParallel(model) + +That's the core behind this tutorial. We will explore it in more detail below. +""" + + +###################################################################### +# Imports and parameters +# ---------------------- +# +# Import PyTorch modules and define parameters. +# + +import torch +import torch.nn as nn +from torch.utils.data import Dataset, DataLoader + +# Parameters and DataLoaders +input_size = 5 +output_size = 2 + +batch_size = 30 +data_size = 100 + + +###################################################################### +# Device +# +device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + +###################################################################### +# Dummy DataSet +# ------------- +# +# Make a dummy (random) dataset. You just need to implement the +# getitem +# + +class RandomDataset(Dataset): + + def __init__(self, size, length): + self.len = length + self.data = torch.randn(length, size) + + def __getitem__(self, index): + return self.data[index] + + def __len__(self): + return self.len + +rand_loader = DataLoader(dataset=RandomDataset(input_size, data_size), + batch_size=batch_size, shuffle=True) + + +###################################################################### +# Simple Model +# ------------ +# +# For the demo, our model just gets an input, performs a linear operation, and +# gives an output. However, you can use ``DataParallel`` on any model (CNN, RNN, +# Capsule Net etc.) +# +# We've placed a print statement inside the model to monitor the size of input +# and output tensors. +# Please pay attention to what is printed at batch rank 0. +# + +class Model(nn.Module): + # Our model + + def __init__(self, input_size, output_size): + super(Model, self).__init__() + self.fc = nn.Linear(input_size, output_size) + + def forward(self, input): + output = self.fc(input) + print("\tIn Model: input size", input.size(), + "output size", output.size()) + + return output + + +###################################################################### +# Create Model and DataParallel +# ----------------------------- +# +# This is the core part of the tutorial. First, we need to make a model instance +# and check if we have multiple GPUs. If we have multiple GPUs, we can wrap +# our model using ``nn.DataParallel``. Then we can put our model on GPUs by +# ``model.to(device)`` +# + +model = Model(input_size, output_size) +if torch.cuda.device_count() > 1: + print("Let's use", torch.cuda.device_count(), "GPUs!") + # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs + model = nn.DataParallel(model) + +model.to(device) + + +###################################################################### +# Run the Model +# ------------- +# +# Now we can see the sizes of input and output tensors. +# + +for data in rand_loader: + input = data.to(device) + output = model(input) + print("Outside: input size", input.size(), + "output_size", output.size()) + + +###################################################################### +# Results +# ------- +# +# If you have no GPU or one GPU, when we batch 30 inputs and 30 outputs, the model gets 30 and outputs 30 as +# expected. But if you have multiple GPUs, then you can get results like this. +# +# 2 GPUs +# ~~~~~~ +# +# If you have 2, you will see: +# +# .. code:: bash +# +# # on 2 GPUs +# Let's use 2 GPUs! +# In Model: input size torch.Size([15, 5]) output size torch.Size([15, 2]) +# In Model: input size torch.Size([15, 5]) output size torch.Size([15, 2]) +# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) +# In Model: input size torch.Size([15, 5]) output size torch.Size([15, 2]) +# In Model: input size torch.Size([15, 5]) output size torch.Size([15, 2]) +# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) +# In Model: input size torch.Size([15, 5]) output size torch.Size([15, 2]) +# In Model: input size torch.Size([15, 5]) output size torch.Size([15, 2]) +# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) +# In Model: input size torch.Size([5, 5]) output size torch.Size([5, 2]) +# In Model: input size torch.Size([5, 5]) output size torch.Size([5, 2]) +# Outside: input size torch.Size([10, 5]) output_size torch.Size([10, 2]) +# +# 3 GPUs +# ~~~~~~ +# +# If you have 3 GPUs, you will see: +# +# .. code:: bash +# +# Let's use 3 GPUs! +# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) +# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) +# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) +# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) +# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) +# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) +# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) +# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) +# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) +# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) +# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) +# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) +# Outside: input size torch.Size([10, 5]) output_size torch.Size([10, 2]) +# +# 8 GPUs +# ~~~~~~~~~~~~~~ +# +# If you have 8, you will see: +# +# .. code:: bash +# +# Let's use 8 GPUs! +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) +# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) +# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) +# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) +# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) +# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) +# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) +# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) +# Outside: input size torch.Size([10, 5]) output_size torch.Size([10, 2]) +# + + +###################################################################### +# Summary +# ------- +# +# DataParallel splits your data automatically and sends job orders to multiple +# models on several GPUs. After each model finishes their job, DataParallel +# collects and merges the results before returning it to you. +# +# For more information, please check out +# https://pytorch.org/tutorials/beginner/former\_torchies/parallelism\_tutorial.html. +# diff --git a/recipes_source/recipes/neural_networks_tutorial.py b/recipes_source/recipes/neural_networks_tutorial.py new file mode 100644 index 0000000000..144dd3d144 --- /dev/null +++ b/recipes_source/recipes/neural_networks_tutorial.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +""" +Neural Networks +=============== + +Neural networks can be constructed using the ``torch.nn`` package. + +Now that you had a glimpse of ``autograd``, ``nn`` depends on +``autograd`` to define models and differentiate them. +An ``nn.Module`` contains layers, and a method ``forward(input)``\ that +returns the ``output``. + +For example, look at this network that classifies digit images: + +.. figure:: /_static/img/mnist.png + :alt: convnet + + convnet + +It is a simple feed-forward network. It takes the input, feeds it +through several layers one after the other, and then finally gives the +output. + +A typical training procedure for a neural network is as follows: + +- Define the neural network that has some learnable parameters (or + weights) +- Iterate over a dataset of inputs +- Process input through the network +- Compute the loss (how far is the output from being correct) +- Propagate gradients back into the network’s parameters +- Update the weights of the network, typically using a simple update rule: + ``weight = weight - learning_rate * gradient`` + +Define the network +------------------ + +Let’s define this network: +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Net(nn.Module): + + def __init__(self): + super(Net, self).__init__() + # 1 input image channel, 6 output channels, 3x3 square convolution + # kernel + self.conv1 = nn.Conv2d(1, 6, 3) + self.conv2 = nn.Conv2d(6, 16, 3) + # an affine operation: y = Wx + b + self.fc1 = nn.Linear(16 * 6 * 6, 120) # 6*6 from image dimension + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + # Max pooling over a (2, 2) window + x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) + # If the size is a square you can only specify a single number + x = F.max_pool2d(F.relu(self.conv2(x)), 2) + x = x.view(-1, self.num_flat_features(x)) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x + + def num_flat_features(self, x): + size = x.size()[1:] # all dimensions except the batch dimension + num_features = 1 + for s in size: + num_features *= s + return num_features + + +net = Net() +print(net) + +######################################################################## +# You just have to define the ``forward`` function, and the ``backward`` +# function (where gradients are computed) is automatically defined for you +# using ``autograd``. +# You can use any of the Tensor operations in the ``forward`` function. +# +# The learnable parameters of a model are returned by ``net.parameters()`` + +params = list(net.parameters()) +print(len(params)) +print(params[0].size()) # conv1's .weight + +######################################################################## +# Let's try a random 32x32 input. +# Note: expected input size of this net (LeNet) is 32x32. To use this net on +# the MNIST dataset, please resize the images from the dataset to 32x32. + +input = torch.randn(1, 1, 32, 32) +out = net(input) +print(out) + +######################################################################## +# Zero the gradient buffers of all parameters and backprops with random +# gradients: +net.zero_grad() +out.backward(torch.randn(1, 10)) + +######################################################################## +# .. note:: +# +# ``torch.nn`` only supports mini-batches. The entire ``torch.nn`` +# package only supports inputs that are a mini-batch of samples, and not +# a single sample. +# +# For example, ``nn.Conv2d`` will take in a 4D Tensor of +# ``nSamples x nChannels x Height x Width``. +# +# If you have a single sample, just use ``input.unsqueeze(0)`` to add +# a fake batch dimension. +# +# Before proceeding further, let's recap all the classes you’ve seen so far. +# +# **Recap:** +# - ``torch.Tensor`` - A *multi-dimensional array* with support for autograd +# operations like ``backward()``. Also *holds the gradient* w.r.t. the +# tensor. +# - ``nn.Module`` - Neural network module. *Convenient way of +# encapsulating parameters*, with helpers for moving them to GPU, +# exporting, loading, etc. +# - ``nn.Parameter`` - A kind of Tensor, that is *automatically +# registered as a parameter when assigned as an attribute to a* +# ``Module``. +# - ``autograd.Function`` - Implements *forward and backward definitions +# of an autograd operation*. Every ``Tensor`` operation creates at +# least a single ``Function`` node that connects to functions that +# created a ``Tensor`` and *encodes its history*. +# +# **At this point, we covered:** +# - Defining a neural network +# - Processing inputs and calling backward +# +# **Still Left:** +# - Computing the loss +# - Updating the weights of the network +# +# Loss Function +# ------------- +# A loss function takes the (output, target) pair of inputs, and computes a +# value that estimates how far away the output is from the target. +# +# There are several different +# `loss functions `_ under the +# nn package . +# A simple loss is: ``nn.MSELoss`` which computes the mean-squared error +# between the input and the target. +# +# For example: + +output = net(input) +target = torch.randn(10) # a dummy target, for example +target = target.view(1, -1) # make it the same shape as output +criterion = nn.MSELoss() + +loss = criterion(output, target) +print(loss) + +######################################################################## +# Now, if you follow ``loss`` in the backward direction, using its +# ``.grad_fn`` attribute, you will see a graph of computations that looks +# like this: +# +# :: +# +# input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d +# -> view -> linear -> relu -> linear -> relu -> linear +# -> MSELoss +# -> loss +# +# So, when we call ``loss.backward()``, the whole graph is differentiated +# w.r.t. the loss, and all Tensors in the graph that has ``requires_grad=True`` +# will have their ``.grad`` Tensor accumulated with the gradient. +# +# For illustration, let us follow a few steps backward: + +print(loss.grad_fn) # MSELoss +print(loss.grad_fn.next_functions[0][0]) # Linear +print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU + +######################################################################## +# Backprop +# -------- +# To backpropagate the error all we have to do is to ``loss.backward()``. +# You need to clear the existing gradients though, else gradients will be +# accumulated to existing gradients. +# +# +# Now we shall call ``loss.backward()``, and have a look at conv1's bias +# gradients before and after the backward. + + +net.zero_grad() # zeroes the gradient buffers of all parameters + +print('conv1.bias.grad before backward') +print(net.conv1.bias.grad) + +loss.backward() + +print('conv1.bias.grad after backward') +print(net.conv1.bias.grad) + +######################################################################## +# Now, we have seen how to use loss functions. +# +# **Read Later:** +# +# The neural network package contains various modules and loss functions +# that form the building blocks of deep neural networks. A full list with +# documentation is `here `_. +# +# **The only thing left to learn is:** +# +# - Updating the weights of the network +# +# Update the weights +# ------------------ +# The simplest update rule used in practice is the Stochastic Gradient +# Descent (SGD): +# +# ``weight = weight - learning_rate * gradient`` +# +# We can implement this using simple Python code: +# +# .. code:: python +# +# learning_rate = 0.01 +# for f in net.parameters(): +# f.data.sub_(f.grad.data * learning_rate) +# +# However, as you use neural networks, you want to use various different +# update rules such as SGD, Nesterov-SGD, Adam, RMSProp, etc. +# To enable this, we built a small package: ``torch.optim`` that +# implements all these methods. Using it is very simple: + +import torch.optim as optim + +# create your optimizer +optimizer = optim.SGD(net.parameters(), lr=0.01) + +# in your training loop: +optimizer.zero_grad() # zero the gradient buffers +output = net(input) +loss = criterion(output, target) +loss.backward() +optimizer.step() # Does the update + + +############################################################### +# .. Note:: +# +# Observe how gradient buffers had to be manually set to zero using +# ``optimizer.zero_grad()``. This is because gradients are accumulated +# as explained in the `Backprop`_ section. diff --git a/recipes_source/recipes/tensor_tutorial.py b/recipes_source/recipes/tensor_tutorial.py new file mode 100644 index 0000000000..7b339ee225 --- /dev/null +++ b/recipes_source/recipes/tensor_tutorial.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +""" +What is PyTorch? +================ + +It’s a Python-based scientific computing package targeted at two sets of +audiences: + +- A replacement for NumPy to use the power of GPUs +- a deep learning research platform that provides maximum flexibility + and speed + +Getting Started +--------------- + +Tensors +^^^^^^^ + +Tensors are similar to NumPy’s ndarrays, with the addition being that +Tensors can also be used on a GPU to accelerate computing. +""" + +from __future__ import print_function +import torch + +############################################################### +# .. note:: +# An uninitialized matrix is declared, +# but does not contain definite known +# values before it is used. When an +# uninitialized matrix is created, +# whatever values were in the allocated +# memory at the time will appear as the initial values. + +############################################################### +# Construct a 5x3 matrix, uninitialized: + +x = torch.empty(5, 3) +print(x) + +############################################################### +# Construct a randomly initialized matrix: + +x = torch.rand(5, 3) +print(x) + +############################################################### +# Construct a matrix filled zeros and of dtype long: + +x = torch.zeros(5, 3, dtype=torch.long) +print(x) + +############################################################### +# Construct a tensor directly from data: + +x = torch.tensor([5.5, 3]) +print(x) + +############################################################### +# or create a tensor based on an existing tensor. These methods +# will reuse properties of the input tensor, e.g. dtype, unless +# new values are provided by user + +x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes +print(x) + +x = torch.randn_like(x, dtype=torch.float) # override dtype! +print(x) # result has the same size + +############################################################### +# Get its size: + +print(x.size()) + +############################################################### +# .. note:: +# ``torch.Size`` is in fact a tuple, so it supports all tuple operations. +# +# Operations +# ^^^^^^^^^^ +# There are multiple syntaxes for operations. In the following +# example, we will take a look at the addition operation. +# +# Addition: syntax 1 +y = torch.rand(5, 3) +print(x + y) + +############################################################### +# Addition: syntax 2 + +print(torch.add(x, y)) + +############################################################### +# Addition: providing an output tensor as argument +result = torch.empty(5, 3) +torch.add(x, y, out=result) +print(result) + +############################################################### +# Addition: in-place + +# adds x to y +y.add_(x) +print(y) + +############################################################### +# .. note:: +# Any operation that mutates a tensor in-place is post-fixed with an ``_``. +# For example: ``x.copy_(y)``, ``x.t_()``, will change ``x``. +# +# You can use standard NumPy-like indexing with all bells and whistles! + +print(x[:, 1]) + +############################################################### +# Resizing: If you want to resize/reshape tensor, you can use ``torch.view``: +x = torch.randn(4, 4) +y = x.view(16) +z = x.view(-1, 8) # the size -1 is inferred from other dimensions +print(x.size(), y.size(), z.size()) + +############################################################### +# If you have a one element tensor, use ``.item()`` to get the value as a +# Python number +x = torch.randn(1) +print(x) +print(x.item()) + +############################################################### +# **Read later:** +# +# +# 100+ Tensor operations, including transposing, indexing, slicing, +# mathematical operations, linear algebra, random numbers, etc., +# are described +# `here `_. +# +# NumPy Bridge +# ------------ +# +# Converting a Torch Tensor to a NumPy array and vice versa is a breeze. +# +# The Torch Tensor and NumPy array will share their underlying memory +# locations (if the Torch Tensor is on CPU), and changing one will change +# the other. +# +# Converting a Torch Tensor to a NumPy Array +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +a = torch.ones(5) +print(a) + +############################################################### +# + +b = a.numpy() +print(b) + +############################################################### +# See how the numpy array changed in value. + +a.add_(1) +print(a) +print(b) + +############################################################### +# Converting NumPy Array to Torch Tensor +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# See how changing the np array changed the Torch Tensor automatically + +import numpy as np +a = np.ones(5) +b = torch.from_numpy(a) +np.add(a, 1, out=a) +print(a) +print(b) + +############################################################### +# All the Tensors on the CPU except a CharTensor support converting to +# NumPy and back. +# +# CUDA Tensors +# ------------ +# +# Tensors can be moved onto any device using the ``.to`` method. + +# let us run this cell only if CUDA is available +# We will use ``torch.device`` objects to move tensors in and out of GPU +if torch.cuda.is_available(): + device = torch.device("cuda") # a CUDA device object + y = torch.ones_like(x, device=device) # directly create a tensor on GPU + x = x.to(device) # or just use strings ``.to("cuda")`` + z = x + y + print(z) + print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together! diff --git a/recipes_source/recipes_main.rst b/recipes_source/recipes_main.rst new file mode 100644 index 0000000000..48a303cb6e --- /dev/null +++ b/recipes_source/recipes_main.rst @@ -0,0 +1,30 @@ +PyTorch Recipes +--------------------------------------------- +Recipes are bite-sized bite-sized, actionable examples of how to use specific PyTorch features, different from our full-length tutorials. + +.. toctree:: + :hidden: + + /recipes/recipes/autograd_tutorial + /recipes/recipes/cifar10_tutorial + /recipes/recipes/data_parallel_tutorial + /recipes/recipes/neural_networks_tutorial + + +.. galleryitem:: /recipes/recipes/autograd_tutorial.py + :figure: /_static/img/autodiff.png + +.. galleryitem:: /recipes/recipes/cifar10_tutorial.py + :figure: /_static/img/cifar10.png + +.. galleryitem:: /recipes/recipes/data_parallel_tutorial.py + :figure: /_static/img/data_parallel.png + +.. galleryitem:: /recipes/recipes/neural_networks_tutorial.py + :figure: /_static/img/mnist.png + + + +.. raw:: html + +
diff --git a/src/pytorch-sphinx-theme b/src/pytorch-sphinx-theme index 19dbba563f..135a5708ac 160000 --- a/src/pytorch-sphinx-theme +++ b/src/pytorch-sphinx-theme @@ -1 +1 @@ -Subproject commit 19dbba563ffd86c4167b6e9ac571556521c25f13 +Subproject commit 135a5708ac5378c717d4bfd5c00a85b768223a65 From d53c5381e47422da4ebf9bb12c557278d392f3b8 Mon Sep 17 00:00:00 2001 From: Jessica Lin Date: Tue, 10 Mar 2020 23:18:09 -0700 Subject: [PATCH 6/8] Update to show initial recipes --- recipes_source/recipes/README.txt | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/recipes_source/recipes/README.txt b/recipes_source/recipes/README.txt index 3383e324ce..8b34cae4f9 100644 --- a/recipes_source/recipes/README.txt +++ b/recipes_source/recipes/README.txt @@ -1,24 +1,20 @@ -Deep Learning with PyTorch: A 60 Minute Blitz +PyTorch Recipes --------------------------------------------- -1. tensor_tutorial.py - What is PyTorch? - https://pytorch.org/tutorials/beginner/blitz/tensor_tutorial.html - -2. autograd_tutorial.py +1. autograd_tutorial.py Autograd: Automatic Differentiation - https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html + https://pytorch.org/tutorials/recipes/recipes/autograd_tutorial.html + +2. cifar10_tutorial.py + Training a Classifier + https://pytorch.org/tutorials/recipes/recipes/cifar10_tutorial.html 3. neural_networks_tutorial.py Neural Networks - https://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html# + https://pytorch.org/tutorials/recipes/recipes/neural_networks_tutorial.html# -4. autograd_tutorial.py - Automatic Differentiation - https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html - -5. cifar10_tutorial.py - Training a Classifier - https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html +4. data_parallel_tutorial.py + Optional: Data Parallelism + https://pytorch.org/tutorials/recipes/recipes/data_parallel_tutorial.html From 9c3fe954f6b05917fd41ca4c38b4f26c36018bed Mon Sep 17 00:00:00 2001 From: Jessica Lin Date: Tue, 10 Mar 2020 23:28:57 -0700 Subject: [PATCH 7/8] Rename recipes_main.rst to recipes_index.rst --- index.rst | 4 ++-- recipes_source/{recipes_main.rst => recipes_index.rst} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename recipes_source/{recipes_main.rst => recipes_index.rst} (100%) diff --git a/index.rst b/index.rst index ba7a771179..d433894984 100644 --- a/index.rst +++ b/index.rst @@ -148,7 +148,7 @@ Text .. raw:: html
- + Reinforcement Learning ---------------------- @@ -273,4 +273,4 @@ Additional APIs :hidden: :caption: Recipes - recipes/recipes_main + recipes/recipes_index diff --git a/recipes_source/recipes_main.rst b/recipes_source/recipes_index.rst similarity index 100% rename from recipes_source/recipes_main.rst rename to recipes_source/recipes_index.rst From 7e80ed3bf2f52ad5079f35d7e895450584c24037 Mon Sep 17 00:00:00 2001 From: Jessica Lin Date: Mon, 23 Mar 2020 16:36:07 -0700 Subject: [PATCH 8/8] Add new tutorials UI with filter. Add recipes. --- README.md | 2 +- .../torch_script_custom_classes.rst | 49 +- beginner_source/blitz/tensor_tutorial.py | 162 +++++-- conf.py | 10 +- custom_directives.py | 141 +++++- index.rst | 428 +++++++++++------- intermediate_source/rpc_tutorial.rst | 13 +- recipes/index.rst | 102 +++++ recipes/recipes/autograd_tutorial.ipynb | 273 +++++++++++ recipes/recipes/autograd_tutorial.py | 198 ++++++++ recipes/recipes/autograd_tutorial.rst | 282 ++++++++++++ recipes/recipes/example_recipe.ipynb | 86 ++++ recipes/recipes/example_recipe.py | 65 +++ recipes/recipes/example_recipe.rst | 108 +++++ .../sphx_glr_autograd_tutorial_thumb.png | Bin 0 -> 26786 bytes .../thumb/sphx_glr_example_recipe_thumb.png | Bin 0 -> 26786 bytes recipes/recipes_index.rst | 20 + recipes_source/README.txt | 8 +- recipes_source/recipes/README.txt | 19 +- recipes_source/recipes/cifar10_tutorial.py | 358 --------------- recipes_source/recipes/custom_dataset.ipynb | 383 ++++++++++++++++ .../recipes/data_parallel_tutorial.py | 255 ----------- recipes_source/recipes/example_recipe.py | 65 +++ .../recipes/neural_networks_tutorial.py | 261 ----------- recipes_source/recipes/tensor_tutorial.py | 195 -------- recipes_source/recipes_index.rst | 16 +- 26 files changed, 2155 insertions(+), 1344 deletions(-) create mode 100644 recipes/index.rst create mode 100644 recipes/recipes/autograd_tutorial.ipynb create mode 100644 recipes/recipes/autograd_tutorial.py create mode 100644 recipes/recipes/autograd_tutorial.rst create mode 100644 recipes/recipes/example_recipe.ipynb create mode 100644 recipes/recipes/example_recipe.py create mode 100644 recipes/recipes/example_recipe.rst create mode 100644 recipes/recipes/images/thumb/sphx_glr_autograd_tutorial_thumb.png create mode 100644 recipes/recipes/images/thumb/sphx_glr_example_recipe_thumb.png create mode 100644 recipes/recipes_index.rst delete mode 100644 recipes_source/recipes/cifar10_tutorial.py create mode 100644 recipes_source/recipes/custom_dataset.ipynb delete mode 100644 recipes_source/recipes/data_parallel_tutorial.py create mode 100644 recipes_source/recipes/example_recipe.py delete mode 100644 recipes_source/recipes/neural_networks_tutorial.py delete mode 100644 recipes_source/recipes/tensor_tutorial.py diff --git a/README.md b/README.md index 7235e04468..c0cbd4f3cd 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ All the tutorials are now presented as sphinx style documentation at: # Contributing -We use sphinx-gallery's [notebook styled examples](https://sphinx-gallery.github.io/tutorials/plot_notebook.html#sphx-glr-tutorials-plot-notebook-py) to create the tutorials. Syntax is very simple. In essence, you write a slightly well formatted python file and it shows up as documentation page. +We use sphinx-gallery's [notebook styled examples](https://sphinx-gallery.github.io/stable/tutorials/index.html) to create the tutorials. Syntax is very simple. In essence, you write a slightly well formatted python file and it shows up as documentation page. Here's how to create a new tutorial: 1. Create a notebook styled python file. If you want it executed while inserted into documentation, save the file with suffix `tutorial` so that file name is `your_tutorial.py`. diff --git a/advanced_source/torch_script_custom_classes.rst b/advanced_source/torch_script_custom_classes.rst index 6ef54a4a8d..93a7834852 100644 --- a/advanced_source/torch_script_custom_classes.rst +++ b/advanced_source/torch_script_custom_classes.rst @@ -29,7 +29,7 @@ state in a member variable. #include template - struct MyStackClass : torch::jit::CustomClassHolder { + struct MyStackClass : torch::CustomClassHolder { std::vector stack_; MyStackClass(std::vector init) : stack_(init.begin(), init.end()) {} @@ -63,7 +63,7 @@ There are several things to note: is to ensure consistent lifetime management of the object instances between languages (C++, Python and TorchScript). - The second thing to notice is that the user-defined class must inherit from - ``torch::jit::CustomClassHolder``. This ensures that everything is set up to handle + ``torch::CustomClassHolder``. This ensures that everything is set up to handle the lifetime management system previously mentioned. Now let's take a look at how we will make this class visible to TorchScript, a process called @@ -73,24 +73,24 @@ Now let's take a look at how we will make this class visible to TorchScript, a p // Notice a few things: // - We pass the class to be registered as a template parameter to - // `torch::jit::class_`. In this instance, we've passed the + // `torch::class_`. In this instance, we've passed the // specialization of the MyStackClass class ``MyStackClass``. // In general, you cannot register a non-specialized template // class. For non-templated classes, you can just pass the // class name directly as the template parameter. - // - The single parameter to ``torch::jit::class_()`` is a + // - The single parameter to ``torch::class_()`` is a // string indicating the name of the class. This is the name // the class will appear as in both Python and TorchScript. // For example, our MyStackClass class would appear as ``torch.classes.MyStackClass``. static auto testStack = - torch::jit::class_>("MyStackClass") + torch::class_>("MyStackClass") // The following line registers the contructor of our MyStackClass // class that takes a single `std::vector` argument, // i.e. it exposes the C++ method `MyStackClass(std::vector init)`. // Currently, we do not support registering overloaded // constructors, so for now you can only `def()` one instance of - // `torch::jit::init`. - .def(torch::jit::init>()) + // `torch::init`. + .def(torch::init>()) // The next line registers a stateless (i.e. no captures) C++ lambda // function as a method. Note that a lambda function must take a // `c10::intrusive_ptr` (or some const/ref version of that) @@ -99,7 +99,7 @@ Now let's take a look at how we will make this class visible to TorchScript, a p return self->stack_.back(); }) // The following four lines expose methods of the MyStackClass - // class as-is. `torch::jit::class_` will automatically examine the + // class as-is. `torch::class_` will automatically examine the // argument and return types of the passed-in method pointers and // expose these to Python and TorchScript accordingly. Finally, notice // that we must take the *address* of the fully-qualified method name, @@ -307,7 +307,7 @@ Let's populate ``infer.cpp`` with the following: #include int main(int argc, const char* argv[]) { - torch::jit::script::Module module; + torch::script::Module module; try { // Deserialize the ScriptModule from a file using torch::jit::load(). module = torch::jit::load("foo.pt"); @@ -394,6 +394,31 @@ And now we can run our exciting C++ binary: Incredible! +Moving Custom Classes To/From IValues +------------------------------------- + +It's also possible that you may need to move custom classes into or out of +``IValue``s, such as when you take or return ``IValue``s from TorchScript methods +or you want to instantiate a custom class attribute in C++. For creating an +``IValue`` from a custom C++ class instance: + +- ``torch::make_custom_class()`` provides an API similar to c10::intrusive_ptr + in that it will take whatever set of arguments you provide to it, call the constructor + of T that matches that set of arguments, and wrap that instance up and return it. + However, instead of returning just a pointer to a custom class object, it returns + an ``IValue`` wrapping the object. You can then pass this ``IValue`` directly to + TorchScript. +- In the event that you already have an ``intrusive_ptr`` pointing to your class, you + can directly construct an IValue from it using the constructor ``IValue(intrusive_ptr)``. + +For converting ``IValue``s back to custom classes: + +- ``IValue::toCustomClass()`` will return an ``intrusive_ptr`` pointing to the + custom class that the ``IValue`` contains. Internally, this function is checking + that ``T`` is registered as a custom class and that the ``IValue`` does in fact contain + a custom class. You can check whether the ``IValue`` contains a custom class manually by + calling ``isCustomClass()``. + Defining Serialization/Deserialization Methods for Custom C++ Classes --------------------------------------------------------------------- @@ -422,7 +447,7 @@ an attribute, you'll get the following error: .. code-block:: shell $ python export_attr.py - RuntimeError: Cannot serialize custom bound C++ class __torch__.torch.classes.MyStackClass. Please define serialization methods via torch::jit::pickle_ for this class. (pushIValueImpl at ../torch/csrc/jit/pickler.cpp:128) + RuntimeError: Cannot serialize custom bound C++ class __torch__.torch.classes.MyStackClass. Please define serialization methods via def_pickle for this class. (pushIValueImpl at ../torch/csrc/jit/pickler.cpp:128) This is because TorchScript cannot automatically figure out what information save from your C++ class. You must specify that manually. The way to do that @@ -441,8 +466,8 @@ Here is an example of how we can update the registration code for our .. code-block:: cpp static auto testStack = - torch::jit::class_>("MyStackClass") - .def(torch::jit::init>()) + torch::class_>("MyStackClass") + .def(torch::init>()) .def("top", [](const c10::intrusive_ptr>& self) { return self->stack_.back(); }) diff --git a/beginner_source/blitz/tensor_tutorial.py b/beginner_source/blitz/tensor_tutorial.py index 7b339ee225..3ce10f3ca3 100644 --- a/beginner_source/blitz/tensor_tutorial.py +++ b/beginner_source/blitz/tensor_tutorial.py @@ -3,101 +3,147 @@ What is PyTorch? ================ -It’s a Python-based scientific computing package targeted at two sets of +It is a open source machine learning framework that accelerates the +path from research prototyping to production deployment. + +PyTorch is built as a Python-based scientific computing package targeted at two sets of audiences: -- A replacement for NumPy to use the power of GPUs -- a deep learning research platform that provides maximum flexibility - and speed +- Those who are looking for a replacement for NumPy to use the power of GPUs. +- Researchers who want to build with a deep learning platform that provides maximum flexibility + and speed. Getting Started --------------- +In this section of the tutorial, we will introduce the concept of a tensor in PyTorch, and its operations. + Tensors ^^^^^^^ -Tensors are similar to NumPy’s ndarrays, with the addition being that -Tensors can also be used on a GPU to accelerate computing. +A tensor is a generic n-dimensional array. Tensors in PyTorch are similar to NumPy’s ndarrays, +with the addition being that tensors can also be used on a GPU to accelerate computing. + +To see the behavior of tensors, we will first need to import PyTorch into our program. """ from __future__ import print_function import torch -############################################################### -# .. note:: -# An uninitialized matrix is declared, -# but does not contain definite known -# values before it is used. When an -# uninitialized matrix is created, -# whatever values were in the allocated -# memory at the time will appear as the initial values. +""" +We import ``future`` here to help port our code from Python 2 to Python 3. +For more details, see the `Python-Future technical documentation `_. + +Let's take a look at how we can create tensors. +""" ############################################################### -# Construct a 5x3 matrix, uninitialized: +# First, construct a 5x3 empty matrix: x = torch.empty(5, 3) print(x) + +""" +``torch.empty`` creates an uninitialized matrix of type tensor. +When an empty tensor is declared, it does not contain definite known values +before you populate it. The values in the empty tensor are those that were in +the allocated memory at the time of initialization. +""" ############################################################### -# Construct a randomly initialized matrix: +# Now, construct a randomly initialized matrix: x = torch.rand(5, 3) print(x) +""" +``torch.rand`` creates an initialized matrix of type tensor with a random +sampling of values. +""" + ############################################################### # Construct a matrix filled zeros and of dtype long: x = torch.zeros(5, 3, dtype=torch.long) print(x) +""" +``torch.zeros`` creates an initialized matrix of type tensor with every +index having a value of zero. +""" + ############################################################### -# Construct a tensor directly from data: +# Let's construct a tensor with data that we define ourselves: x = torch.tensor([5.5, 3]) print(x) +""" +Our tensor can represent all types of data. This data can be an audio waveform, the +pixels of an image, even entities of a language. + +PyTorch has packages that support these specific data types. For additional learning, see: +- `torchvision `_ +- `torchtext `_ +- `torchaudio `_ +""" + ############################################################### -# or create a tensor based on an existing tensor. These methods -# will reuse properties of the input tensor, e.g. dtype, unless -# new values are provided by user +# You can create a tensor based on an existing tensor. These methods +# reuse the properties of the input tensor, e.g. ``dtype``, unless +# new values are provided by the user. +# -x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes +x = x.new_ones(5, 3, dtype=torch.double) print(x) x = torch.randn_like(x, dtype=torch.float) # override dtype! print(x) # result has the same size +""" +``tensor.new_*`` methods take in the size of the tensor and a ``dtype``, +returning a tensor filled with ones. + +In this example,``torch.randn_like`` creates a new tensor based upon the +input tensor, and overrides the ``dtype`` to be a float. The output of +this method is a tensor of the same size and different ``dtype``. +""" + ############################################################### -# Get its size: +# We can get the size of a tensor as a tuple: print(x.size()) ############################################################### # .. note:: -# ``torch.Size`` is in fact a tuple, so it supports all tuple operations. +# Since ``torch.Size`` is a tuple, it supports all tuple operations. # # Operations # ^^^^^^^^^^ -# There are multiple syntaxes for operations. In the following -# example, we will take a look at the addition operation. +# There are multiple syntaxes for operations that can be performed on tensors. +# In the following example, we will take a look at the addition operation. # -# Addition: syntax 1 +# First, let's try using the ``+`` operator. + y = torch.rand(5, 3) print(x + y) ############################################################### -# Addition: syntax 2 +# Using the ``+`` operator should have the same output as using the +# ``add()`` method. print(torch.add(x, y)) ############################################################### -# Addition: providing an output tensor as argument +# You can also provide a tensor as an argument to the ``add()`` +# method that will contain the data of the output operation. + result = torch.empty(5, 3) torch.add(x, y, out=result) print(result) ############################################################### -# Addition: in-place +# Finally, you can perform this operation in-place. # adds x to y y.add_(x) @@ -107,21 +153,29 @@ # .. note:: # Any operation that mutates a tensor in-place is post-fixed with an ``_``. # For example: ``x.copy_(y)``, ``x.t_()``, will change ``x``. -# -# You can use standard NumPy-like indexing with all bells and whistles! + +############################################################### +# Similar to NumPy, tensors can be indexed using the standard +# Python ``x[i]`` syntax, where ``x`` is the array and ``i`` is the selection. +# +# That said, you can use NumPy-like indexing with all its bells and whistles! print(x[:, 1]) ############################################################### -# Resizing: If you want to resize/reshape tensor, you can use ``torch.view``: +# Resizing your tensors might be necessary for your data. +# If you want to resize or reshape tensor, you can use ``torch.view``: + x = torch.randn(4, 4) y = x.view(16) z = x.view(-1, 8) # the size -1 is inferred from other dimensions print(x.size(), y.size(), z.size()) ############################################################### -# If you have a one element tensor, use ``.item()`` to get the value as a -# Python number +# You can access the Python number-value of a one-element tensor using ``.item()``. +# If you have a multidimensional tensor, see the +# `tolist() `_ method. + x = torch.randn(1) print(x) print(x.item()) @@ -130,43 +184,55 @@ # **Read later:** # # -# 100+ Tensor operations, including transposing, indexing, slicing, -# mathematical operations, linear algebra, random numbers, etc., -# are described -# `here `_. +# This was just a sample of the 100+ Tensor operations you have +# access to in PyTorch. There are many others, including transposing, +# indexing, slicing, mathematical operations, linear algebra, +# random numbers, and more. Read and explore more about them in our +# `technical documentation `_. # # NumPy Bridge # ------------ # -# Converting a Torch Tensor to a NumPy array and vice versa is a breeze. +# As mentioned earlier, one of the benefits of using PyTorch is that it +# is built to provide a seemless transition from NumPy. +# +# For example, converting a Torch Tensor to a NumPy array (and vice versa) +# is a breeze. # # The Torch Tensor and NumPy array will share their underlying memory -# locations (if the Torch Tensor is on CPU), and changing one will change +# locations (if the Torch Tensor is on CPU). That means, changing one will change # the other. # +# Let's see this in action. +# # Converting a Torch Tensor to a NumPy Array # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# First, construct a 1-dimensional tensor populated with ones. a = torch.ones(5) print(a) ############################################################### -# +# Now, let's construct a NumPy array based off of that tensor. b = a.numpy() print(b) ############################################################### -# See how the numpy array changed in value. +# Let's see how they share their memory locations. Add ``1`` to the torch tensor. a.add_(1) print(a) print(b) +############################################################### +# Take note how the numpy array also changed in value. + ############################################################### # Converting NumPy Array to Torch Tensor # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -# See how changing the np array changed the Torch Tensor automatically +# Try the same thing for NumPy to Torch Tensor. +# See how changing the NumPy array changed the Torch Tensor automatically as well. import numpy as np a = np.ones(5) @@ -176,15 +242,17 @@ print(b) ############################################################### -# All the Tensors on the CPU except a CharTensor support converting to +# All the Tensors on the CPU (except a CharTensor) support converting to # NumPy and back. # # CUDA Tensors # ------------ # # Tensors can be moved onto any device using the ``.to`` method. +# The following code block can be run by changing the runtime in +# your notebook to "GPU" or greater. -# let us run this cell only if CUDA is available +# This cell will run only if CUDA is available # We will use ``torch.device`` objects to move tensors in and out of GPU if torch.cuda.is_available(): device = torch.device("cuda") # a CUDA device object @@ -193,3 +261,7 @@ z = x + y print(z) print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together! + +############################################################### +# Now that you have had time to experiment with Tensors in PyTorch, let's take +# a look at Automatic Differentiation. diff --git a/conf.py b/conf.py index a041d96748..2a5a3e2209 100644 --- a/conf.py +++ b/conf.py @@ -34,7 +34,7 @@ import torch import glob import shutil -from custom_directives import IncludeDirective, GalleryItemDirective, CustomGalleryItemDirective +from custom_directives import IncludeDirective, GalleryItemDirective, CustomGalleryItemDirective, CustomCalloutItemDirective, CustomCardItemDirective try: @@ -42,7 +42,7 @@ except ImportError: import warnings warnings.warn('unable to load "torchvision" package') -import pytorch_sphinx_theme +#import pytorch_sphinx_theme # -- General configuration ------------------------------------------------ @@ -62,7 +62,7 @@ sphinx_gallery_conf = { 'examples_dirs': ['beginner_source', 'intermediate_source', 'advanced_source', 'recipes_source'], - 'gallery_dirs': ['beginner', 'intermediate', 'advanced','recipes'], + 'gallery_dirs': ['beginner', 'intermediate', 'advanced', 'recipes'], 'filename_pattern': os.environ.get('GALLERY_PATTERN', r'tutorial.py'), 'backreferences_dir': False } @@ -162,7 +162,7 @@ html_theme = 'pytorch_sphinx_theme' -html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] +html_theme_path = ['../../pytorch_sphinx_theme-1'] html_logo = '_static/img/pytorch-logo-dark.svg' html_theme_options = { 'pytorch_project': 'tutorials', @@ -237,3 +237,5 @@ def setup(app): app.add_directive('includenodoc', IncludeDirective) app.add_directive('galleryitem', GalleryItemDirective) app.add_directive('customgalleryitem', CustomGalleryItemDirective) + app.add_directive('customcarditem', CustomCardItemDirective) + app.add_directive('customcalloutitem', CustomCalloutItemDirective) diff --git a/custom_directives.py b/custom_directives.py index 943b14d7e4..cfb1174409 100644 --- a/custom_directives.py +++ b/custom_directives.py @@ -1,5 +1,5 @@ from docutils.parsers.rst import Directive, directives -from docutils.statemachine import StringList +from docutils.statemachine import StringList from docutils import nodes import re import os @@ -142,7 +142,7 @@ class CustomGalleryItemDirective(Directive): """Create a sphinx gallery style thumbnail. tooltip and figure are self explanatory. Description could be a link to - a document like in below example. + a document like in below example. Example usage: @@ -206,3 +206,140 @@ def run(self): thumb = nodes.paragraph() self.state.nested_parse(thumbnail, self.content_offset, thumb) return [thumb] + + +class CustomCardItemDirective(Directive): + option_spec = {'header': directives.unchanged, + 'image': directives.unchanged, + 'link': directives.unchanged, + 'card_description': directives.unchanged, + 'tags': directives.unchanged} + + def run(self): + try: + if 'header' in self.options: + header = self.options['header'] + else: + raise ValueError('header not doc found') + + if 'image' in self.options: + image = "" + else: + image = '_static/img/thumbnails/default.png' + + if 'link' in self.options: + link = self.options['link'] + else: + link = '' + + if 'card_description' in self.options: + card_description = self.options['card_description'] + else: + card_description = '' + + if 'tags' in self.options: + tags = self.options['tags'] + else: + tags = '' + + except FileNotFoundError as e: + print(e) + return [] + except ValueError as e: + print(e) + raise + return [] + + card_rst = CARD_TEMPLATE.format(header=header, + image=image, + link=link, + card_description=card_description, + tags=tags) + card_list = StringList(card_rst.split('\n')) + card = nodes.paragraph() + self.state.nested_parse(card_list, self.content_offset, card) + return [card] + + +CARD_TEMPLATE = """ +.. raw:: html + +
+ +
+ +
+ +
+

{header}

+
+ +

{card_description}

+ +

{tags}

+ +
{image}
+ +
+ +
+ +
+""" + +class CustomCalloutItemDirective(Directive): + option_spec = {'header': directives.unchanged, + 'description': directives.unchanged, + 'button_link': directives.unchanged, + 'button_text': directives.unchanged} + + def run(self): + try: + if 'description' in self.options: + description = self.options['description'] + else: + description = '' + + if 'header' in self.options: + header = self.options['header'] + else: + raise ValueError('header not doc found') + + if 'button_link' in self.options: + button_link = self.options['button_link'] + else: + button_link = '' + + if 'button_text' in self.options: + button_text = self.options['button_text'] + else: + button_text = '' + + except FileNotFoundError as e: + print(e) + return [] + except ValueError as e: + print(e) + raise + return [] + + callout_rst = CALLOUT_TEMPLATE.format(description=description, + header=header, + button_link=button_link, + button_text=button_text) + callout_list = StringList(callout_rst.split('\n')) + callout = nodes.paragraph() + self.state.nested_parse(callout_list, self.content_offset, callout) + return [callout] + +CALLOUT_TEMPLATE = """ +.. raw:: html + +
+
+

{header}

+

{description}

+ {button_text} +
+
+""" diff --git a/index.rst b/index.rst index d433894984..1f0b64cd49 100644 --- a/index.rst +++ b/index.rst @@ -1,205 +1,296 @@ Welcome to PyTorch Tutorials ============================ -To learn how to use PyTorch, begin with our Getting Started Tutorials. -The :doc:`60-minute blitz ` is the most common -starting point, and provides a broad view into how to use PyTorch from the basics all the way into constructing deep neural networks. - -Some considerations: - -* We’ve added a new feature to tutorials that allows users to open the notebook associated with a tutorial in Google Colab. - Visit `this page `_ for more information. -* If you would like to do the tutorials interactively via IPython / Jupyter, - each tutorial has a download link for a Jupyter Notebook and Python source code. -* Additional high-quality examples are available, including image classification, - unsupervised learning, reinforcement learning, machine translation, and - many other applications, in `PyTorch Examples - `_. -* You can find reference documentation for the PyTorch API and layers in `PyTorch Docs - `_ or via inline help. -* If you would like the tutorials section improved, please open a github issue - `here `_ with your feedback. -* Check out our - `PyTorch Cheat Sheet `_ - for additional useful information. -* Finally, here's a link to the - `PyTorch Release Notes `_ - -Learning PyTorch ------------------- - -.. customgalleryitem:: - :figure: /_static/img/thumbnails/pytorch-logo-flat.png - :tooltip: Understand PyTorch’s Tensor library and neural networks at a high level - :description: :doc:`/beginner/deep_learning_60min_blitz` - -.. customgalleryitem:: - :tooltip: This tutorial introduces the fundamental concepts of PyTorch through self-contained examples - :figure: /_static/img/thumbnails/examples.png - :description: :doc:`/beginner/pytorch_with_examples` - -.. customgalleryitem:: - :figure: /_static/img/torch.nn.png - :tooltip: Use torch.nn to create and train a neural network - :description: :doc:`beginner/nn_tutorial` - -.. customgalleryitem:: - :figure: /_static/img/thumbnails/pytorch_tensorboard.png - :tooltip: Learn to use TensorBoard to visualize data and model training - :description: :doc:`intermediate/tensorboard_tutorial` - .. raw:: html -
- - -Image/Video ----------------------- - -.. customgalleryitem:: - :figure: /_static/img/thumbnails/tv-img.png - :tooltip: Finetuning a pre-trained Mask R-CNN model - :description: :doc:`intermediate/torchvision_tutorial` - -.. customgalleryitem:: - :figure: /_static/img/thumbnails/sphx_glr_transfer_learning_tutorial_001.png - :tooltip: In transfer learning, a model created from one task is used in another - :description: :doc:`beginner/transfer_learning_tutorial` +
+
-.. customgalleryitem:: - :figure: /_static/img/panda.png - :tooltip: Raise your awareness to the security vulnerabilities of ML models, and get insight into the hot topic of adversarial machine learning - :description: :doc:`beginner/fgsm_tutorial` +.. Add callout items below this line -.. customgalleryitem:: - :tooltip: Train a generative adversarial network (GAN) to generate new celebrities - :figure: /_static/img/dcgan_generator.png - :description: :doc:`beginner/dcgan_faces_tutorial` +.. customcalloutitem:: + :description: The 60 min blitz is the most common starting point and provides a broad view on how to use PyTorch. It covers the basics all to the way constructing deep neural networks. + :header: New to PyTorch? + :button_link: beginner/deep_learning_60min_blitz.html + :button_text: Start 60-min blitz -.. customgalleryitem:: - :tooltip: (experimental) Static Quantization with Eager Mode in PyTorch - :figure: /_static/img/qat.png - :description: :doc:`advanced/static_quantization_tutorial` - -.. customgalleryitem:: - :tooltip: Perform quantized transfer learning with feature extractor - :description: :doc:`/intermediate/quantized_transfer_learning_tutorial` - :figure: /_static/img/quantized_transfer_learning.png +.. customcalloutitem:: + :description: Bite-sized, ready-to-deploy PyTorch code examples. + :header: Recipes + :button_link: recipes/recipe_index.html + :button_text: Explore Recipes +.. End of callout item section .. raw:: html -
- -Audio ----------------------- - -.. customgalleryitem:: - :figure: /_static/img/audio_preprocessing_tutorial_waveform.png - :tooltip: Preprocessing with torchaudio Tutorial - :description: :doc:`beginner/audio_preprocessing_tutorial` +
+
+ +
+ + + +
+ +
+ +
+
+ +.. Add tutorial cards below this line + +.. Learning PyTorch + +.. customcarditem:: + :header: Deep Learning with PyTorch: A 60 Minute Blitz + :card_description: Understand PyTorch’s Tensor library and neural networks at a high level. + :image: _static/img/thumbnails/pytorch-logo-flat.png + :link: beginner/deep_learning_60min_blitz.html + :tags: Getting-Started + +.. customcarditem:: + :header: Learning PyTorch with Examples + :card_description: This tutorial introduces the fundamental concepts of PyTorch through self-contained examples. + :image: _static/img/thumbnails/examples.png + :link: beginner/pytorch_with_examples.html + :tags: Getting-Started + +.. customcarditem:: + :header: What is torch.nn really? + :card_description: Use torch.nn to create and train a neural network. + :image: _static/img/torch.nn.png + :link: beginner/nn_tutorial.html + :tags: Getting-Started + +.. customcarditem:: + :header: Visualizing Models, Data, and Training with Tensorboard + :card_description: Learn to use TensorBoard to visualize data and model training. + :image: _static/img/thumbnails/pytorch_tensorboard.png + :link: intermediate/tensorboard_tutorial.html + :tags: Interpretability, Getting-Started, Tensorboard + +.. Image/Video + +.. customcarditem:: + :header: TorchVision Object Detection Finetuning Tutorial + :card_description: Finetune a pre-trained Mask R-CNN model. + :image: _static/img/thumbnails/tv-img.png + :link: intermediate/torchvision_tutorial.html + :tags: Image/Video + +.. customcarditem:: + :header: Transfer Learning for Computer Vision Tutorial + :card_description: Train a convolutional neural network for image classification using transfer learning. + :image: _static/img/thumbnails/sphx_glr_transfer_learning_tutorial_001.png + :link: beginner/transfer_learning_tutorial.html + :tags: Image/Video + +.. customcarditem:: + :header: Adversarial Example Generation + :card_description: Train a convolutional neural network for image classification using transfer learning. + :image: _static/img/panda.png + :link: beginner/fgsm_tutorial.html + :tags: Image/Video + +.. customcarditem:: + :header: DCGAN Tutorial + :card_description: Train a generative adversarial network (GAN) to generate new celebrities. + :image: _static/img/dcgan_generator.png + :link: beginner/dcgan_faces_tutorial.html + :tags: Image/Video + +.. customcarditem:: + :header: (Experimental) Static Quantization with Eager Mode in PyTorch + :card_description: Learn techniques to impove a model's accuracy = post-training static quantization, per-channel quantization, and quantization-aware training. + :image: _static/img/qat.png + :link: advanced/static_quantization_tutorial.html + :tags: Image/Video, Quantization, Model-Optimization + +.. customcarditem:: + :header: (Experimental) Quantized Transfer Learning for Computer Vision Tutorial + :card_description: Learn techniques to impove a model's accuracy - post-training static quantization, per-channel quantization, and quantization-aware training. + :image: _static/img/qat.png + :link: advanced/static_quantization_tutorial.html + :tags: Image/Video, Quantization, Model-Optimization + +.. Audio + +.. customcarditem:: + :header: torchaudio Tutorial + :card_description: Learn to load and preprocess data from a simple dataset with PyTorch's torchaudio library. + :image: _static/img/audio_preprocessing_tutorial_waveform.png + :link: beginner/audio_preprocessing_tutorial.html + :tags: Audio + +.. Text + +.. customcarditem:: + :header: Sequence-to-Sequence Modeling wiht nn.Transformer and torchtext + :card_description: Learn how to train a sequence-to-sequence model that uses the nn.Transformer module. + :image: _static/img/transformer_architecture.jpg + :link: beginner/transformer_tutorial.html + :tags: Text + +.. customcarditem:: + :header: NLP from Scratch: Classifying Names with a Character-level RNN + :card_description: Build and train a basic character-level RNN to classify word from scratch without the use of torchtext. First in a series of three tutorials. + :image: _static/img/rnnclass.png + :link: intermediate/char_rnn_classification_tutorial + :tags: Text + +.. customcarditem:: + :header: NLP from Scratch: Generating Names with a Character-level RNN + :card_description: After using character-level RNN to classify names, leanr how to generate names from languages. Second in a series of three tutorials. + :image: _static/img/char_rnn_generation.png + :link: intermediate/char_rnn_generation_tutorial.html + :tags: Text + +.. customcarditem:: + :header: NLP from Scratch: Translation with a Sequence-to-sequence Network and Attention + :card_description: This is the third and final tutorial on doing “NLP From Scratch”, where we write our own classes and functions to preprocess the data to do our NLP modeling tasks. + :image: _static/img/seq2seq_flat.png + :link: intermediate/seq2seq_translation_tutorial.html + :tags: Text + +.. customcarditem:: + :header: Text Classification with Torchtext + :card_description: This is the third and final tutorial on doing “NLP From Scratch”, where we write our own classes and functions to preprocess the data to do our NLP modeling tasks. + :image: _static/img/text_sentiment_ngrams_model.png + :link: beginner/text_sentiment_ngrams_tutorial.html + :tags: Text + +.. customcarditem:: + :header: Language Translation with Torchtext + :card_description: Use torchtext to reprocess data from a well-known datasets containing both English and German. Then use it to train a sequence-to-sequence model. + :image: _static/img/thumbnails/german_to_english_translation.png + :link: beginner/torchtext_translation_tutorial.html + :tags: Text + +.. customcarditem:: + :header: (Experimental) Dynamic Quantization on an LSTM Word Language Model + :card_description: Apply dynamic quantization, the easiest form of quantization, to a LSTM-based next word prediction model. + :image: _static/img/quant_asym.png + :link: advanced/dynamic_quantization_tutorial.html + :tags: Text, Quantization, Model-Optimization + +.. customcarditem:: + :header: (Experimental) Dynamic Quantization on BERT + :card_description: Apply the dynamic quantization on a BERT (Bidirectional Embedding Representations from Transformers) model. + :image: _static/img/bert.png + :link: intermediate/dynamic_quantization_bert_tutorial.html + :tags: Text, Quantization, Model-Optimization + +.. Reinforcement Learning + +.. customcarditem:: + :header: Reinforcement Learning (DQN) + :card_description: Learn how to use PyTorch to train a Deep Q Learning (DQN) agent on the CartPole-v0 task from the OpenAI Gym. + :image: _static/img/cartpole.gif + :link: intermediate/reinforcement_q_learning.html + :tags: Reinforcement-Learning + +.. Additional APIs + +.. customcarditem:: + :header: Using the PyTorch C++ Frontend + :card_description: Walk through an end-to-end example of training a model with the C++ frontend by training a DCGAN – a kind of generative model – to generate images of MNIST digits. + :image: _static/img/cpp-pytorch.png + :link: advanced/cpp_frontend.html + :tags: C++ + + +.. customcarditem:: + :header: (Experimental) Introduction to Named Tensors in PyTorch + :card_description: Learn how to use PyTorch to train a Deep Q Learning (DQN) agent on the CartPole-v0 task from the OpenAI Gym. + :image: _static/img/named_tensor.png + :link: intermediate/named_tensor_tutorial.html + :tags: Named-Tensor, Best-Practice + +.. customcarditem:: + :header: Pruning Tutorial + :card_description: Learn how to use torch.nn.utils.prune to sparsify your neural networks, and how to extend it to implement your own custom pruning technique. + :image: _static/img/pruning.png + :link: intermediate/pruning_tutorial.html + :tags: Model-Optimization, Best-Practice + +.. End of tutorial card section .. raw:: html -
- +
-Text ----------------------- +
-.. customgalleryitem:: - :tooltip: Transformer Tutorial - :figure: /_static/img/transformer_architecture.jpg - :description: :doc:`/beginner/transformer_tutorial` +
-.. customgalleryitem:: - :figure: /_static/img/rnnclass.png - :tooltip: Build and train a basic character-level RNN to classify words - :description: :doc:`intermediate/char_rnn_classification_tutorial` +
-.. customgalleryitem:: - :figure: /_static/img/char_rnn_generation.png - :tooltip: Generate names from languages - :description: :doc:`intermediate/char_rnn_generation_tutorial` +.. .. galleryitem:: beginner/saving_loading_models.py -.. galleryitem:: intermediate/seq2seq_translation_tutorial.py - :figure: _static/img/seq2seq_flat.png - -.. customgalleryitem:: - :tooltip: Sentiment Ngrams with Torchtext - :figure: /_static/img/text_sentiment_ngrams_model.png - :description: :doc:`/beginner/text_sentiment_ngrams_tutorial` - -.. customgalleryitem:: - :tooltip: Language Translation with Torchtext - :figure: /_static/img/thumbnails/german_to_english_translation.png - :description: :doc:`/beginner/torchtext_translation_tutorial` - -.. customgalleryitem:: - :tooltip: Perform dynamic quantization on a pre-trained PyTorch model - :description: :doc:`/advanced/dynamic_quantization_tutorial` - :figure: _static/img/quant_asym.png - -.. customgalleryitem:: - :tooltip: Convert a well-known state-of-the-art model like BERT into dynamic quantized model - :description: :doc:`/intermediate/dynamic_quantization_bert_tutorial` - :figure: /_static/img/bert.png +Additional Resources +============================ .. raw:: html -
- -Reinforcement Learning ----------------------- - -.. customgalleryitem:: - :tooltip: Use PyTorch to train a Deep Q Learning (DQN) agent - :figure: /_static/img/cartpole.gif - :description: :doc:`intermediate/reinforcement_q_learning` +
+
-.. raw:: html +.. Add callout items below this line -
+.. customcalloutitem:: + :header: Examples of PyTorch + :description: A set of examples around pytorch in Vision, Text, Reinforcement Learning, etc. + :button_link: https://github.com/pytorch/examples + :button_text: Check Them Out -Reinforcement Learning ----------------------- +.. customcalloutitem:: + :header: Recipes + :description: Bite-sized, ready-to-deploy PyTorch code examples. + :button_link: recipes/recipes_index.html + :button_text: Explore Recipes -.. customgalleryitem:: - :tooltip: Use PyTorch to train a Deep Q Learning (DQN) agent - :figure: /_static/img/cartpole.gif - :description: :doc:`intermediate/reinforcement_q_learning` +.. customcalloutitem:: + :header: PyTorch Cheat Sheet + :description: Quick overview to essential PyTorch elements. + :button_link: beginner/ptcheat.html + :button_text: Download -.. raw:: html +.. customcalloutitem:: + :header: Tutorials on GitHub + :description: Access PyTorch Tutorials from GitHub. + :button_link: https://github.com/pytorch/tutorials + :button_text: Go To GitHub -
- -Additional APIs ----------------------- - -.. customgalleryitem:: - :tooltip: Using the PyTorch C++ Frontend - :figure: /_static/img/cpp-pytorch.png - :description: :doc:`advanced/cpp_frontend` - -.. customgalleryitem:: - :figure: /_static/img/named_tensor.png - :tooltip: Named Tensor - :description: :doc:`intermediate/named_tensor_tutorial` - -.. customgalleryitem:: - :tooltip: Use pruning to sparsify your neural networks - :description: :doc:`/intermediate/pruning_tutorial` - :figure: _static/img/pruning.png +.. End of callout section .. raw:: html -
+
+
+
.. ----------------------------------------- .. Page TOC .. ----------------------------------------- +.. toctree:: + :maxdepth: 2 + :hidden: + :includehidden: + :caption: Recipes + + recipes/recipes_index + .. toctree:: :maxdepth: 2 :hidden: @@ -265,12 +356,3 @@ Additional APIs advanced/cpp_frontend intermediate/named_tensor_tutorial intermediate/pruning_tutorial - - -.. toctree:: - :maxdepth: 2 - :includehidden: - :hidden: - :caption: Recipes - - recipes/recipes_index diff --git a/intermediate_source/rpc_tutorial.rst b/intermediate_source/rpc_tutorial.rst index 81111607de..cd88393003 100644 --- a/intermediate_source/rpc_tutorial.rst +++ b/intermediate_source/rpc_tutorial.rst @@ -598,19 +598,20 @@ accumulate to the same set of ``Tensors``. # train for 10 iterations for epoch in range(10): - # create distributed autograd context for data, target in get_next_batch(): - with dist_autograd.context(): + # create distributed autograd context + with dist_autograd.context() as context_id: hidden[0].detach_() hidden[1].detach_() output, hidden = model(data, hidden) loss = criterion(output, target) # run distributed backward pass - dist_autograd.backward([loss]) + dist_autograd.backward(context_id, [loss]) # run distributed optimizer - opt.step() - # not necessary to zero grads as each iteration creates a different - # distributed autograd context which hosts different grads + opt.step(context_id) + # not necessary to zero grads since they are + # accumulated into the distributed autograd context + # which is reset every iteration. print("Training epoch {}".format(epoch)) diff --git a/recipes/index.rst b/recipes/index.rst new file mode 100644 index 0000000000..b53f854344 --- /dev/null +++ b/recipes/index.rst @@ -0,0 +1,102 @@ +:orphan: + + + +.. _sphx_glr_recipes: + +Recipes +------------------ +1. recipes/* and recipes_index.rst + PyTorch Recipes + https://pytorch.org/tutorials/recipes/recipes_index.html + + +.. raw:: html + +
+ + + +.. _sphx_glr_recipes_recipes: + +PyTorch Recipes +--------------------------------------------- + +1. autograd_tutorial.py + Autograd: Automatic Differentiation + https://pytorch.org/tutorials/recipes/recipes/autograd_tutorial.html + + +1. example_recipe.py + Example Recipe + https://pytorch.org/tutorials/recipes/recipes/example_recipe.html + + + +.. raw:: html + +
+ +.. only:: html + + .. figure:: /recipes/recipes/images/thumb/sphx_glr_example_recipe_thumb.png + + :ref:`sphx_glr_recipes_recipes_example_recipe.py` + +.. raw:: html + +
+ + +.. toctree:: + :hidden: + + /recipes/recipes/example_recipe + +.. raw:: html + +
+ +.. only:: html + + .. figure:: /recipes/recipes/images/thumb/sphx_glr_autograd_tutorial_thumb.png + + :ref:`sphx_glr_recipes_recipes_autograd_tutorial.py` + +.. raw:: html + +
+ + +.. toctree:: + :hidden: + + /recipes/recipes/autograd_tutorial +.. raw:: html + +
+ + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-gallery + + + .. container:: sphx-glr-download + + :download:`Download all examples in Python source code: recipes_python.zip ` + + + + .. container:: sphx-glr-download + + :download:`Download all examples in Jupyter notebooks: recipes_jupyter.zip ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/recipes/recipes/autograd_tutorial.ipynb b/recipes/recipes/autograd_tutorial.ipynb new file mode 100644 index 0000000000..e79d40980e --- /dev/null +++ b/recipes/recipes/autograd_tutorial.ipynb @@ -0,0 +1,273 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nAutograd: Automatic Differentiation\n===================================\n\nCentral to all neural networks in PyTorch is the ``autograd`` package.\nLet\u2019s first briefly visit this, and we will then go to training our\nfirst neural network.\n\n\nThe ``autograd`` package provides automatic differentiation for all operations\non Tensors. It is a define-by-run framework, which means that your backprop is\ndefined by how your code is run, and that every single iteration can be\ndifferent.\n\nLet us see this in more simple terms with some examples.\n\nTensor\n--------\n\n``torch.Tensor`` is the central class of the package. If you set its attribute\n``.requires_grad`` as ``True``, it starts to track all operations on it. When\nyou finish your computation you can call ``.backward()`` and have all the\ngradients computed automatically. The gradient for this tensor will be\naccumulated into ``.grad`` attribute.\n\nTo stop a tensor from tracking history, you can call ``.detach()`` to detach\nit from the computation history, and to prevent future computation from being\ntracked.\n\nTo prevent tracking history (and using memory), you can also wrap the code block\nin ``with torch.no_grad():``. This can be particularly helpful when evaluating a\nmodel because the model may have trainable parameters with\n``requires_grad=True``, but for which we don't need the gradients.\n\nThere\u2019s one more class which is very important for autograd\nimplementation - a ``Function``.\n\n``Tensor`` and ``Function`` are interconnected and build up an acyclic\ngraph, that encodes a complete history of computation. Each tensor has\na ``.grad_fn`` attribute that references a ``Function`` that has created\nthe ``Tensor`` (except for Tensors created by the user - their\n``grad_fn is None``).\n\nIf you want to compute the derivatives, you can call ``.backward()`` on\na ``Tensor``. If ``Tensor`` is a scalar (i.e. it holds a one element\ndata), you don\u2019t need to specify any arguments to ``backward()``,\nhowever if it has more elements, you need to specify a ``gradient``\nargument that is a tensor of matching shape.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import torch" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create a tensor and set ``requires_grad=True`` to track computation with it\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "x = torch.ones(2, 2, requires_grad=True)\nprint(x)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Do a tensor operation:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "y = x + 2\nprint(y)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "``y`` was created as a result of an operation, so it has a ``grad_fn``.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(y.grad_fn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Do more operations on ``y``\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "z = y * y * 3\nout = z.mean()\n\nprint(z, out)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "``.requires_grad_( ... )`` changes an existing Tensor's ``requires_grad``\nflag in-place. The input flag defaults to ``False`` if not given.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "a = torch.randn(2, 2)\na = ((a * 3) / (a - 1))\nprint(a.requires_grad)\na.requires_grad_(True)\nprint(a.requires_grad)\nb = (a * a).sum()\nprint(b.grad_fn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Gradients\n---------\nLet's backprop now.\nBecause ``out`` contains a single scalar, ``out.backward()`` is\nequivalent to ``out.backward(torch.tensor(1.))``.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "out.backward()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Print gradients d(out)/dx\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(x.grad)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You should have got a matrix of ``4.5``. Let\u2019s call the ``out``\n*Tensor* \u201c$o$\u201d.\nWe have that $o = \\frac{1}{4}\\sum_i z_i$,\n$z_i = 3(x_i+2)^2$ and $z_i\\bigr\\rvert_{x_i=1} = 27$.\nTherefore,\n$\\frac{\\partial o}{\\partial x_i} = \\frac{3}{2}(x_i+2)$, hence\n$\\frac{\\partial o}{\\partial x_i}\\bigr\\rvert_{x_i=1} = \\frac{9}{2} = 4.5$.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Mathematically, if you have a vector valued function $\\vec{y}=f(\\vec{x})$,\nthen the gradient of $\\vec{y}$ with respect to $\\vec{x}$\nis a Jacobian matrix:\n\n\\begin{align}J=\\left(\\begin{array}{ccc}\n \\frac{\\partial y_{1}}{\\partial x_{1}} & \\cdots & \\frac{\\partial y_{1}}{\\partial x_{n}}\\\\\n \\vdots & \\ddots & \\vdots\\\\\n \\frac{\\partial y_{m}}{\\partial x_{1}} & \\cdots & \\frac{\\partial y_{m}}{\\partial x_{n}}\n \\end{array}\\right)\\end{align}\n\nGenerally speaking, ``torch.autograd`` is an engine for computing\nvector-Jacobian product. That is, given any vector\n$v=\\left(\\begin{array}{cccc} v_{1} & v_{2} & \\cdots & v_{m}\\end{array}\\right)^{T}$,\ncompute the product $v^{T}\\cdot J$. If $v$ happens to be\nthe gradient of a scalar function $l=g\\left(\\vec{y}\\right)$,\nthat is,\n$v=\\left(\\begin{array}{ccc}\\frac{\\partial l}{\\partial y_{1}} & \\cdots & \\frac{\\partial l}{\\partial y_{m}}\\end{array}\\right)^{T}$,\nthen by the chain rule, the vector-Jacobian product would be the\ngradient of $l$ with respect to $\\vec{x}$:\n\n\\begin{align}J^{T}\\cdot v=\\left(\\begin{array}{ccc}\n \\frac{\\partial y_{1}}{\\partial x_{1}} & \\cdots & \\frac{\\partial y_{m}}{\\partial x_{1}}\\\\\n \\vdots & \\ddots & \\vdots\\\\\n \\frac{\\partial y_{1}}{\\partial x_{n}} & \\cdots & \\frac{\\partial y_{m}}{\\partial x_{n}}\n \\end{array}\\right)\\left(\\begin{array}{c}\n \\frac{\\partial l}{\\partial y_{1}}\\\\\n \\vdots\\\\\n \\frac{\\partial l}{\\partial y_{m}}\n \\end{array}\\right)=\\left(\\begin{array}{c}\n \\frac{\\partial l}{\\partial x_{1}}\\\\\n \\vdots\\\\\n \\frac{\\partial l}{\\partial x_{n}}\n \\end{array}\\right)\\end{align}\n\n(Note that $v^{T}\\cdot J$ gives a row vector which can be\ntreated as a column vector by taking $J^{T}\\cdot v$.)\n\nThis characteristic of vector-Jacobian product makes it very\nconvenient to feed external gradients into a model that has\nnon-scalar output.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's take a look at an example of vector-Jacobian product:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "x = torch.randn(3, requires_grad=True)\n\ny = x * 2\nwhile y.data.norm() < 1000:\n y = y * 2\n\nprint(y)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now in this case ``y`` is no longer a scalar. ``torch.autograd``\ncould not compute the full Jacobian directly, but if we just\nwant the vector-Jacobian product, simply pass the vector to\n``backward`` as argument:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "v = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float)\ny.backward(v)\n\nprint(x.grad)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also stop autograd from tracking history on Tensors\nwith ``.requires_grad=True`` either by wrapping the code block in\n``with torch.no_grad():``\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(x.requires_grad)\nprint((x ** 2).requires_grad)\n\nwith torch.no_grad():\n\tprint((x ** 2).requires_grad)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Or by using ``.detach()`` to get a new Tensor with the same\ncontent but that does not require gradients:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(x.requires_grad)\ny = x.detach()\nprint(y.requires_grad)\nprint(x.eq(y).all())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Read Later:**\n\nDocument about ``autograd.Function`` is at\nhttps://pytorch.org/docs/stable/autograd.html#function\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/recipes/recipes/autograd_tutorial.py b/recipes/recipes/autograd_tutorial.py new file mode 100644 index 0000000000..98e70a251d --- /dev/null +++ b/recipes/recipes/autograd_tutorial.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +""" +Autograd: Automatic Differentiation +=================================== + +Central to all neural networks in PyTorch is the ``autograd`` package. +Let’s first briefly visit this, and we will then go to training our +first neural network. + + +The ``autograd`` package provides automatic differentiation for all operations +on Tensors. It is a define-by-run framework, which means that your backprop is +defined by how your code is run, and that every single iteration can be +different. + +Let us see this in more simple terms with some examples. + +Tensor +-------- + +``torch.Tensor`` is the central class of the package. If you set its attribute +``.requires_grad`` as ``True``, it starts to track all operations on it. When +you finish your computation you can call ``.backward()`` and have all the +gradients computed automatically. The gradient for this tensor will be +accumulated into ``.grad`` attribute. + +To stop a tensor from tracking history, you can call ``.detach()`` to detach +it from the computation history, and to prevent future computation from being +tracked. + +To prevent tracking history (and using memory), you can also wrap the code block +in ``with torch.no_grad():``. This can be particularly helpful when evaluating a +model because the model may have trainable parameters with +``requires_grad=True``, but for which we don't need the gradients. + +There’s one more class which is very important for autograd +implementation - a ``Function``. + +``Tensor`` and ``Function`` are interconnected and build up an acyclic +graph, that encodes a complete history of computation. Each tensor has +a ``.grad_fn`` attribute that references a ``Function`` that has created +the ``Tensor`` (except for Tensors created by the user - their +``grad_fn is None``). + +If you want to compute the derivatives, you can call ``.backward()`` on +a ``Tensor``. If ``Tensor`` is a scalar (i.e. it holds a one element +data), you don’t need to specify any arguments to ``backward()``, +however if it has more elements, you need to specify a ``gradient`` +argument that is a tensor of matching shape. +""" + +import torch + +############################################################### +# Create a tensor and set ``requires_grad=True`` to track computation with it +x = torch.ones(2, 2, requires_grad=True) +print(x) + +############################################################### +# Do a tensor operation: +y = x + 2 +print(y) + +############################################################### +# ``y`` was created as a result of an operation, so it has a ``grad_fn``. +print(y.grad_fn) + +############################################################### +# Do more operations on ``y`` +z = y * y * 3 +out = z.mean() + +print(z, out) + +################################################################ +# ``.requires_grad_( ... )`` changes an existing Tensor's ``requires_grad`` +# flag in-place. The input flag defaults to ``False`` if not given. +a = torch.randn(2, 2) +a = ((a * 3) / (a - 1)) +print(a.requires_grad) +a.requires_grad_(True) +print(a.requires_grad) +b = (a * a).sum() +print(b.grad_fn) + +############################################################### +# Gradients +# --------- +# Let's backprop now. +# Because ``out`` contains a single scalar, ``out.backward()`` is +# equivalent to ``out.backward(torch.tensor(1.))``. + +out.backward() + +############################################################### +# Print gradients d(out)/dx +# + +print(x.grad) + +############################################################### +# You should have got a matrix of ``4.5``. Let’s call the ``out`` +# *Tensor* “:math:`o`”. +# We have that :math:`o = \frac{1}{4}\sum_i z_i`, +# :math:`z_i = 3(x_i+2)^2` and :math:`z_i\bigr\rvert_{x_i=1} = 27`. +# Therefore, +# :math:`\frac{\partial o}{\partial x_i} = \frac{3}{2}(x_i+2)`, hence +# :math:`\frac{\partial o}{\partial x_i}\bigr\rvert_{x_i=1} = \frac{9}{2} = 4.5`. + +############################################################### +# Mathematically, if you have a vector valued function :math:`\vec{y}=f(\vec{x})`, +# then the gradient of :math:`\vec{y}` with respect to :math:`\vec{x}` +# is a Jacobian matrix: +# +# .. math:: +# J=\left(\begin{array}{ccc} +# \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{1}}{\partial x_{n}}\\ +# \vdots & \ddots & \vdots\\ +# \frac{\partial y_{m}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} +# \end{array}\right) +# +# Generally speaking, ``torch.autograd`` is an engine for computing +# vector-Jacobian product. That is, given any vector +# :math:`v=\left(\begin{array}{cccc} v_{1} & v_{2} & \cdots & v_{m}\end{array}\right)^{T}`, +# compute the product :math:`v^{T}\cdot J`. If :math:`v` happens to be +# the gradient of a scalar function :math:`l=g\left(\vec{y}\right)`, +# that is, +# :math:`v=\left(\begin{array}{ccc}\frac{\partial l}{\partial y_{1}} & \cdots & \frac{\partial l}{\partial y_{m}}\end{array}\right)^{T}`, +# then by the chain rule, the vector-Jacobian product would be the +# gradient of :math:`l` with respect to :math:`\vec{x}`: +# +# .. math:: +# J^{T}\cdot v=\left(\begin{array}{ccc} +# \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{1}}\\ +# \vdots & \ddots & \vdots\\ +# \frac{\partial y_{1}}{\partial x_{n}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} +# \end{array}\right)\left(\begin{array}{c} +# \frac{\partial l}{\partial y_{1}}\\ +# \vdots\\ +# \frac{\partial l}{\partial y_{m}} +# \end{array}\right)=\left(\begin{array}{c} +# \frac{\partial l}{\partial x_{1}}\\ +# \vdots\\ +# \frac{\partial l}{\partial x_{n}} +# \end{array}\right) +# +# (Note that :math:`v^{T}\cdot J` gives a row vector which can be +# treated as a column vector by taking :math:`J^{T}\cdot v`.) +# +# This characteristic of vector-Jacobian product makes it very +# convenient to feed external gradients into a model that has +# non-scalar output. + +############################################################### +# Now let's take a look at an example of vector-Jacobian product: + +x = torch.randn(3, requires_grad=True) + +y = x * 2 +while y.data.norm() < 1000: + y = y * 2 + +print(y) + +############################################################### +# Now in this case ``y`` is no longer a scalar. ``torch.autograd`` +# could not compute the full Jacobian directly, but if we just +# want the vector-Jacobian product, simply pass the vector to +# ``backward`` as argument: +v = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float) +y.backward(v) + +print(x.grad) + +############################################################### +# You can also stop autograd from tracking history on Tensors +# with ``.requires_grad=True`` either by wrapping the code block in +# ``with torch.no_grad():`` +print(x.requires_grad) +print((x ** 2).requires_grad) + +with torch.no_grad(): + print((x ** 2).requires_grad) + +############################################################### +# Or by using ``.detach()`` to get a new Tensor with the same +# content but that does not require gradients: +print(x.requires_grad) +y = x.detach() +print(y.requires_grad) +print(x.eq(y).all()) + + +############################################################### +# **Read Later:** +# +# Document about ``autograd.Function`` is at +# https://pytorch.org/docs/stable/autograd.html#function diff --git a/recipes/recipes/autograd_tutorial.rst b/recipes/recipes/autograd_tutorial.rst new file mode 100644 index 0000000000..900cdf78ba --- /dev/null +++ b/recipes/recipes/autograd_tutorial.rst @@ -0,0 +1,282 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here ` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_recipes_recipes_autograd_tutorial.py: + + +Autograd: Automatic Differentiation +=================================== + +Central to all neural networks in PyTorch is the ``autograd`` package. +Let’s first briefly visit this, and we will then go to training our +first neural network. + + +The ``autograd`` package provides automatic differentiation for all operations +on Tensors. It is a define-by-run framework, which means that your backprop is +defined by how your code is run, and that every single iteration can be +different. + +Let us see this in more simple terms with some examples. + +Tensor +-------- + +``torch.Tensor`` is the central class of the package. If you set its attribute +``.requires_grad`` as ``True``, it starts to track all operations on it. When +you finish your computation you can call ``.backward()`` and have all the +gradients computed automatically. The gradient for this tensor will be +accumulated into ``.grad`` attribute. + +To stop a tensor from tracking history, you can call ``.detach()`` to detach +it from the computation history, and to prevent future computation from being +tracked. + +To prevent tracking history (and using memory), you can also wrap the code block +in ``with torch.no_grad():``. This can be particularly helpful when evaluating a +model because the model may have trainable parameters with +``requires_grad=True``, but for which we don't need the gradients. + +There’s one more class which is very important for autograd +implementation - a ``Function``. + +``Tensor`` and ``Function`` are interconnected and build up an acyclic +graph, that encodes a complete history of computation. Each tensor has +a ``.grad_fn`` attribute that references a ``Function`` that has created +the ``Tensor`` (except for Tensors created by the user - their +``grad_fn is None``). + +If you want to compute the derivatives, you can call ``.backward()`` on +a ``Tensor``. If ``Tensor`` is a scalar (i.e. it holds a one element +data), you don’t need to specify any arguments to ``backward()``, +however if it has more elements, you need to specify a ``gradient`` +argument that is a tensor of matching shape. + +.. code-block:: default + + + import torch + + +Create a tensor and set ``requires_grad=True`` to track computation with it + + +.. code-block:: default + + x = torch.ones(2, 2, requires_grad=True) + print(x) + + +Do a tensor operation: + + +.. code-block:: default + + y = x + 2 + print(y) + + +``y`` was created as a result of an operation, so it has a ``grad_fn``. + + +.. code-block:: default + + print(y.grad_fn) + + +Do more operations on ``y`` + + +.. code-block:: default + + z = y * y * 3 + out = z.mean() + + print(z, out) + + +``.requires_grad_( ... )`` changes an existing Tensor's ``requires_grad`` +flag in-place. The input flag defaults to ``False`` if not given. + + +.. code-block:: default + + a = torch.randn(2, 2) + a = ((a * 3) / (a - 1)) + print(a.requires_grad) + a.requires_grad_(True) + print(a.requires_grad) + b = (a * a).sum() + print(b.grad_fn) + + +Gradients +--------- +Let's backprop now. +Because ``out`` contains a single scalar, ``out.backward()`` is +equivalent to ``out.backward(torch.tensor(1.))``. + + +.. code-block:: default + + + out.backward() + + +Print gradients d(out)/dx + + + +.. code-block:: default + + + print(x.grad) + + +You should have got a matrix of ``4.5``. Let’s call the ``out`` +*Tensor* “:math:`o`”. +We have that :math:`o = \frac{1}{4}\sum_i z_i`, +:math:`z_i = 3(x_i+2)^2` and :math:`z_i\bigr\rvert_{x_i=1} = 27`. +Therefore, +:math:`\frac{\partial o}{\partial x_i} = \frac{3}{2}(x_i+2)`, hence +:math:`\frac{\partial o}{\partial x_i}\bigr\rvert_{x_i=1} = \frac{9}{2} = 4.5`. + +Mathematically, if you have a vector valued function :math:`\vec{y}=f(\vec{x})`, +then the gradient of :math:`\vec{y}` with respect to :math:`\vec{x}` +is a Jacobian matrix: + +.. math:: + J=\left(\begin{array}{ccc} + \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{1}}{\partial x_{n}}\\ + \vdots & \ddots & \vdots\\ + \frac{\partial y_{m}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} + \end{array}\right) + +Generally speaking, ``torch.autograd`` is an engine for computing +vector-Jacobian product. That is, given any vector +:math:`v=\left(\begin{array}{cccc} v_{1} & v_{2} & \cdots & v_{m}\end{array}\right)^{T}`, +compute the product :math:`v^{T}\cdot J`. If :math:`v` happens to be +the gradient of a scalar function :math:`l=g\left(\vec{y}\right)`, +that is, +:math:`v=\left(\begin{array}{ccc}\frac{\partial l}{\partial y_{1}} & \cdots & \frac{\partial l}{\partial y_{m}}\end{array}\right)^{T}`, +then by the chain rule, the vector-Jacobian product would be the +gradient of :math:`l` with respect to :math:`\vec{x}`: + +.. math:: + J^{T}\cdot v=\left(\begin{array}{ccc} + \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{1}}\\ + \vdots & \ddots & \vdots\\ + \frac{\partial y_{1}}{\partial x_{n}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} + \end{array}\right)\left(\begin{array}{c} + \frac{\partial l}{\partial y_{1}}\\ + \vdots\\ + \frac{\partial l}{\partial y_{m}} + \end{array}\right)=\left(\begin{array}{c} + \frac{\partial l}{\partial x_{1}}\\ + \vdots\\ + \frac{\partial l}{\partial x_{n}} + \end{array}\right) + +(Note that :math:`v^{T}\cdot J` gives a row vector which can be +treated as a column vector by taking :math:`J^{T}\cdot v`.) + +This characteristic of vector-Jacobian product makes it very +convenient to feed external gradients into a model that has +non-scalar output. + +Now let's take a look at an example of vector-Jacobian product: + + +.. code-block:: default + + + x = torch.randn(3, requires_grad=True) + + y = x * 2 + while y.data.norm() < 1000: + y = y * 2 + + print(y) + + +Now in this case ``y`` is no longer a scalar. ``torch.autograd`` +could not compute the full Jacobian directly, but if we just +want the vector-Jacobian product, simply pass the vector to +``backward`` as argument: + + +.. code-block:: default + + v = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float) + y.backward(v) + + print(x.grad) + + +You can also stop autograd from tracking history on Tensors +with ``.requires_grad=True`` either by wrapping the code block in +``with torch.no_grad():`` + + +.. code-block:: default + + print(x.requires_grad) + print((x ** 2).requires_grad) + + with torch.no_grad(): + print((x ** 2).requires_grad) + + +Or by using ``.detach()`` to get a new Tensor with the same +content but that does not require gradients: + + +.. code-block:: default + + print(x.requires_grad) + y = x.detach() + print(y.requires_grad) + print(x.eq(y).all()) + + + +**Read Later:** + +Document about ``autograd.Function`` is at +https://pytorch.org/docs/stable/autograd.html#function + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.000 seconds) + + +.. _sphx_glr_download_recipes_recipes_autograd_tutorial.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: autograd_tutorial.py ` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: autograd_tutorial.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/recipes/recipes/example_recipe.ipynb b/recipes/recipes/example_recipe.ipynb new file mode 100644 index 0000000000..809b0c4362 --- /dev/null +++ b/recipes/recipes/example_recipe.ipynb @@ -0,0 +1,86 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\nTODO: Add Recipe Title\n=======================\n\nTODO: \n * Include 1-2 sentences summing up what the user can expect from the recipe.\n * For example - \u201cThis samples demonstrates how to...\u201d\n\nIntroduction\n--------------\nTODO: \n * Add why is this topic important?\n * Ex: Provide a summary of how Integrated Gradients works and how you will teach users to implement it using Captum in this tutorial\n\nSetup\n----------------------\nTODO: \n * Call out any required setup or data downloads\n\n\nTODO: List Steps\n-----------------\nTODO: \n * Use the steps you introduced in the Learning Objectives\n * Break down the steps as well as add prose for context\n * Add comments in the code to help clarify for readers what each section is doing\n * Link back to relevant pytorch documentation\n * Think of it akin to creating a really practical Medium post\n\nTIPS: \n * To denote a word or phrase as code, enclose it in double backticks (``). ``torch.Tensor``\n * You can **bold** or *italicize* text for emphasis. \n * Add python code directly in the file. The output will render and build on the site in a separate code block. \n Below is an example of python code with comments. \n You can build this python file to see the resulting html by following the README.md at github.com/pytorch/tutorials\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import torch" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Because of the line of pound sign delimiters above, this comment will show up as plain text between the code.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "x = torch.ones(2, 2, requires_grad=True)\n# Since this is a single line comment, it will show up as a comment in the code block\nprint(x)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ".. Note::\n\n You can add Notes using this syntax\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Learn More\n----------------------------\nTODO:\n * Link to any additional resources (e.g. Docs, other Tutorials, external resources) if readers want to learn more\n * There are different ways add hyperlinks - \n * For example, pasting the url works: Read more about the ``autograd.Function`` at https://pytorch.org/docs/stable/autograd.html#function. \n * or link to other files in this repository by their titles such as :doc:`data_parallel_tutorial`.\n * There are also ways to add internal and external links. Check out this resource for more tips: https://thomas-cokelaer.info/tutorials/sphinx/rest_syntax.html#id4\n\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/recipes/recipes/example_recipe.py b/recipes/recipes/example_recipe.py new file mode 100644 index 0000000000..9dd3bb199b --- /dev/null +++ b/recipes/recipes/example_recipe.py @@ -0,0 +1,65 @@ +""" +TODO: Add Recipe Title +======================= + +TODO: + * Include 1-2 sentences summing up what the user can expect from the recipe. + * For example - “This samples demonstrates how to...” + +Introduction +-------------- +TODO: + * Add why is this topic important? + * Ex: Provide a summary of how Integrated Gradients works and how you will teach users to implement it using Captum in this tutorial + +Setup +---------------------- +TODO: + * Call out any required setup or data downloads + + +TODO: List Steps +----------------- +TODO: + * Use the steps you introduced in the Learning Objectives + * Break down the steps as well as add prose for context + * Add comments in the code to help clarify for readers what each section is doing + * Link back to relevant pytorch documentation + * Think of it akin to creating a really practical Medium post + +TIPS: + * To denote a word or phrase as code, enclose it in double backticks (``). ``torch.Tensor`` + * You can **bold** or *italicize* text for emphasis. + * Add python code directly in the file. The output will render and build on the site in a separate code block. + Below is an example of python code with comments. + You can build this python file to see the resulting html by following the README.md at github.com/pytorch/tutorials +""" + +import torch + +############################################################### +# Because of the line of pound sign delimiters above, this comment will show up as plain text between the code. +x = torch.ones(2, 2, requires_grad=True) +# Since this is a single line comment, it will show up as a comment in the code block +print(x) + + + +############################################################### +# .. Note:: +# +# You can add Notes using this syntax + + + + +######################################################################## +# Learn More +# ---------------------------- +# TODO: +# * Link to any additional resources (e.g. Docs, other Tutorials, external resources) if readers want to learn more +# * There are different ways add hyperlinks - +# * For example, pasting the url works: Read more about the ``autograd.Function`` at https://pytorch.org/docs/stable/autograd.html#function. +# * or link to other files in this repository by their titles such as :doc:`data_parallel_tutorial`. +# * There are also ways to add internal and external links. Check out this resource for more tips: https://thomas-cokelaer.info/tutorials/sphinx/rest_syntax.html#id4 +# diff --git a/recipes/recipes/example_recipe.rst b/recipes/recipes/example_recipe.rst new file mode 100644 index 0000000000..1ff9e6f18e --- /dev/null +++ b/recipes/recipes/example_recipe.rst @@ -0,0 +1,108 @@ +.. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here ` to download the full example code +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_recipes_recipes_example_recipe.py: + + +TODO: Add Recipe Title +======================= + +TODO: + * Include 1-2 sentences summing up what the user can expect from the recipe. + * For example - “This samples demonstrates how to...” + +Introduction +-------------- +TODO: + * Add why is this topic important? + * Ex: Provide a summary of how Integrated Gradients works and how you will teach users to implement it using Captum in this tutorial + +Setup +---------------------- +TODO: + * Call out any required setup or data downloads + + +TODO: List Steps +----------------- +TODO: + * Use the steps you introduced in the Learning Objectives + * Break down the steps as well as add prose for context + * Add comments in the code to help clarify for readers what each section is doing + * Link back to relevant pytorch documentation + * Think of it akin to creating a really practical Medium post + +TIPS: + * To denote a word or phrase as code, enclose it in double backticks (``). ``torch.Tensor`` + * You can **bold** or *italicize* text for emphasis. + * Add python code directly in the file. The output will render and build on the site in a separate code block. + Below is an example of python code with comments. + You can build this python file to see the resulting html by following the README.md at github.com/pytorch/tutorials + +.. code-block:: default + + + import torch + + +Because of the line of pound sign delimiters above, this comment will show up as plain text between the code. + + +.. code-block:: default + + x = torch.ones(2, 2, requires_grad=True) + # Since this is a single line comment, it will show up as a comment in the code block + print(x) + + + + +.. Note:: + + You can add Notes using this syntax + +Learn More +---------------------------- +TODO: + * Link to any additional resources (e.g. Docs, other Tutorials, external resources) if readers want to learn more + * There are different ways add hyperlinks - + * For example, pasting the url works: Read more about the ``autograd.Function`` at https://pytorch.org/docs/stable/autograd.html#function. + * or link to other files in this repository by their titles such as :doc:`data_parallel_tutorial`. + * There are also ways to add internal and external links. Check out this resource for more tips: https://thomas-cokelaer.info/tutorials/sphinx/rest_syntax.html#id4 + + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.000 seconds) + + +.. _sphx_glr_download_recipes_recipes_example_recipe.py: + + +.. only :: html + + .. container:: sphx-glr-footer + :class: sphx-glr-footer-example + + + + .. container:: sphx-glr-download + + :download:`Download Python source code: example_recipe.py ` + + + + .. container:: sphx-glr-download + + :download:`Download Jupyter notebook: example_recipe.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/recipes/recipes/images/thumb/sphx_glr_autograd_tutorial_thumb.png b/recipes/recipes/images/thumb/sphx_glr_autograd_tutorial_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc GIT binary patch literal 26786 zcmdRV^;2BU^L2u2U~zXzaCgrl!EKS?!Gl|HcbDK2+!l9tx8Uwhki}V?FVCmmf8zb& zR^6I=Yo=zVr~8~U-QmiL(eC?7t2K>sEKRQ>SbW8C{gfQ0bg;;5VB{^5g#S%F4jS*yo)sIw9jjnBhR8wuiv*@haoMbsRi`-;s) z9j%)m&qag`YmLl;-VH-wNFilp)`}=RE6iLb8P1ZXot7lPS$|;j zQ6%{N?}NV#OoGh&8zR;Jmp{}RVNldC0&_43DEv|>!G?j^q;OxAyX7Gs_2A9hI-55o zJ)F=U@-pAz+w*?0hK)p%z(s{qJ1Q+Wv_8Z|z3!m1Cs=&-?)J_5Y815AaDH?$jYLqo(p$kO2iW|9P%S0_{dC|2h=c{Q!&bPfpfv0!@c%J z#_Pjh7|{hTI0kIgMg zxdOWkb%vZV^4lVL61EU=I9L*Tl+?h$RKCIz*L82w5j+y@957yB-O`t_fpVnvVkw>7 zP^nvW>L7(+HJXphZ(3gXtZxLHZqdPVywxY{9!|2K&AEn|@;JMhAZj7?4y>5SAxJrx zurYaog3=|Jtznf^wPDLtdAwz{-~VzM&<1f=cKIarDeaAWbp~AUPnA+mO6+`%gWKCS zLp?PEPTuCPmyhdAo0>J$4)AJv7RQniq@AMH$$_|;sq({O!`Uktc6UT#*dpB361N~I z%}5Y5rnnv`0pPZ?4^O==rd+Ct+d=y*e@vt1VbH@M#e;)=cJ$F$(v2B^Vh2r+Vx~*+Z!G1Zgmq&GRTTsGq2_{@Nw>D#DWRt=`t`6c z7T8oA;ZHP!1QLB9M_mrUm>*io2P8vwgJhjCQtU#iZyN}MKEQUSOs}$gCgi)#1GuGMe*f#+Z&G1I+vYi6K9g)e*Db>=_;17hfr#kK~YJsMnHz>ujQG zarIZk$|9PqN6mLW?3l6YSILh*)(}TG6B!@=Y&v9B#NjQ@cyJA{AT*Rq{1RO3KfRt& zITYih)&2p&c2`~JyxOI7rZ8o2AVPcMJ0duaT+CTY?{Jzelx=^QZxo4TA}5e|0FnJx z2h9h%+q97bj6^`YgpWV zZE^bI*Om;wM(XMc%-aGKa}>axr_VZ=;zDWt8O2-J^Bxv}Ut2%&^MW1SJF|SbFz&kG zy&cWX=>0)&urGzaisqDY!NnC_{E{vDTOHp*`@+VJID&^uD425yvR;&V7JOge@>7l;gt``i}FMDWyMVa`YI-%kD!E>S> zL|JS2ysS@b3@)Qb=BIHHjmnEDA|!p~!={`J0RJ#+a8dEKKlk|iw9a|2w`u3eeq7J2 zf-BAe-~bY*&0(nx$VAm(#iNTWBKx>hl>cQ(d|miS=*dXB;Oo=bVeiP)u35s(Z6STU zHpM@WqxIr<<7Z_^I!@Fq-s0HMekIN5ZdZuA8p<$z5M*`7l{e(44IGp#EYwEGo9l@@ z6GbbJ5wt%PcTH`mE9=xg_LPL9p}aIOXebdn84NVw&M*s5qmw zSAf$>HX1)|Ed8?UGO(gY6zH<4agAW*RQ-Ux7>@~#J zmS$%j>>%DF4Y=98ql77up`%Kpu;DPvUrv*EhV?47yMlP0yl6hzZ|fFAuTWA614#IsaO$_k zcURoeoZI7P`+2*MMC}ycnKbq zdhu&vPwa4VxmvM1`j?s0O}oTP&vV-WMtq+90RkmnkO&!Yvc#TgvQ`BFLbdrz?S;T~ zehN^!jvpxcNekpZ#eH3Pq#LsoY`b@>QN$aAV{~(&#mAEtd`K+bsZ4IGv=%(zq*3Xs z;3Rfjc zkktoX6uL9`be86_LQik`_Ya-b3(;-vd_U$lnA*wemDJ|?HO9AdUpJdGrMmL_VXdOB z53f;FLk`&=fS`S&PG^o`)InjY@-!!OH&@QVDqhZ(i_7rANfL{LH&pQ<#6nd=k@R&QX0zW~~Wq*_`b9rNZYq=4x|Qbh_5fW+|0jZXKA2Gglg!KYS4dpoJq9281$ ztI5PC9ZkFni;lbsSRI+`dV_<2vI#7bicpH+{d`{I&a(w2bj5n;~YRsn$5{c$bbM zLp`gi<}PqEx5=fu{@$`b8|&0pik;)ojkp*SGu9J-aY?~8d}E6YnF?L^Q*N$;h)+)M zM%+AaC)QY@X5SI#N78RF95e}jrx?i{u%sUH( z&F-em&Q+v^3Hfvted*?*6eORtd44he9$m_2uf_-u6H^xFve=)EF>h`gg3$ic6_)ea zQjmCUv-pK|(_$10b)X&)yZeaX`^%g0HIP?RAgl7^kiyoT z2Sc+M8OsBmIm<3h^&9K>>%hL~oMI^=-xk0PY9Hx^Ewq?q#4JliEENe7 zE(%!vQ~SSRB+U}ICT{;Xs%G|W-``$ zTF?jz$B)r(#XFeWV2$xsxW0HgSK7HZ@UUu-{kkE@Lky5%Pt6>X5`q95F(?F5Eu70V z!Y>OcUe9E%E7;u4;(y!~BRy<>z1^!Gdg6;?irc1Kk2vwYfn1HrnS{p1m`s{7WIG?q zAmwgD`j59UQbjRcnPP5pEhm5`vOhBW)AUYPuYPW_hIU&}F7HbX3fy+AX{6>soT1g# zD2<)GielaDxB>of?4Ec<(TnLceijD3Ze7h0suV%Imdj$OeLqMG6_R z2~|YF_uo{q{8~vyb`SpCf4k4BOkZ)C9Z#YMq^*{p-nuS;<;qOG_UAM1&feCCiddBx zMJw3EZGul1{lKuaW+!gvI&1gqo8~S?EHUJEyGCTllavF1YKf&rfe_)nm%h6q7}tQG zoP&Jl>0O2#)6-ziNKN4>5eq5R1U*{hVX|Ea$nt8Up;o}NK_}Sa^+yp2c6@lwN7ugJ zx0l!|AeDK{vY)+KzY##WxgI_mwN07=>*i9|zWkt|6wvLW-)tY*xl*^^CiPvX-1&~S zyr_gH%_YmC|;YvYVG~9*;Mw3dd$|J7DCGwH}Xc#hH=bz`L$TM>C*V9Kc%UCtgd=DUUdizG>g|erOOUj<7DvV(6^D9or9O(7bT=6$}@LsxS zk@>EP%gzAq6D(&R$zi@M3fmbeY2f0**!_C6MG`}CfPGjfhD5fNZ!~+ytpwiY6q(sD%>cY^p2zfX5}_gov(_;9#$3S2FiPC0h8M$c1_s|YrK#c9hz3Z zS^Mo0f{rwaO8HX#f0Cl>E3B&p7Iimm*->hfWS|jI^s54--;kJs@ivU1Uvm0JIKAxQ z9j51*FUG(H%gM>y2h={YoqQRrIN@HuB#z3i#`*@4*>PfO$_#ANdi@V-3-h5ytMH$} zA5)R2hKPDZ!<|e!qLyWl&f!h1?T77=v{Ed_)U)7s4lI!ezI@H@ITI_=4w;|$gGC-- zqnT{sKOCjQJ+dG{&a7cEarZm7OYq%#s7Z=E$&_Mo)5?Yc_!+|zWIxKujt7;W{M*z7 zdIpA^lJFVpS|T&%8BGUV4|TSSSKRLlbJ3V zpf#PCmbS|L{_K<&Jxj6cCd9S3EnNFPkhg4(T8rL!hEfy9guQ5#m7F?d5$6xV!L9Ki zvHaoMuQd_TC&Et~x3#ncbG9jf(}$D_`H$A`s#`h?WvFA{O5II{L~ z;n;$McVrUq$V7R=1u8=R4}3M6KNs{7Y?KY2aI97+-_26QPIK_Q^3L0#MTZob${Z(6 z;c{kO{Db;5W(oKwO=vOta`Vb9@fp)=U%2jM&T~ZXGYiH5+Y`dVL)}tKBc;PN=8Rjf z-lq^DIQt9dUauz1cphzsnIBy<6&VKNtG+TDk7yTT1&JG^#$7@9Ug0QLX8Cesr zDT4$yL9BpBb*hLn(j~jnr0Mpala3iKY%U z9&0$&f~ukr%wUCJ1$7_S%k3^gJY3Qo&x#D+>5*k7asU{pbo=+tuT>=ZkisU2J)SQl zNnsdHAtoh@YGRKCd;MzEo9XlJJANNAeL_%VDc2jS_@r*mfT-^FE0_4GiV6BUA}jj3 zYmvDcoN{XT@}^wT`ScccVr~%P1~c;45AR?55X4Dy_6w+|WBYIFo`5ZT5n4GZ9r7^C zZX9%iE?D4>((*4f@oZ2xAnJ=_!6C=pi;GTYt2Q&=X^4`L+uHX!t9K@4Hvsq)9-|7JXfeN3?RjZTGBQ)tpe$Ph3xgNn2vX-#!!xgl!h<}a>k3zZ}Q1r z0=yeM$ERRwwY0GCDS7BQS)I28KOC|IpeAsl+sXUdhjm2);H+zeM(`3>r4Ccp?+Aaw zRonhho{{ZHfJ*r}dGr)&XAt)$f)^ zwG&gX6WQYDMf$JF+%(T%22m)b!H$5&i`8gDlg-JaP^D5JIqak@ckkoWIn|{&fjV`` z+xegEsEnub@7%Hx{->Wu$h*c;fA5MEQ5lQHLpHa!np8c#)i1GirdNF(^RKg-G@5)C zw@yQS;dKgEo*B%2PoUn{gD90lUy+<(B#ycGfxvZBICB^%4sg@xoh*-q<$OZ7heJ;I zudhy0@1Im0s6L<7M4Jh+I<*C#s!OxX;2ZfBx9h0JseLe=xN zah5^>ps>R(59~(zOi1-jll(L&4e$yQNO;xc@w$CJU!{IebA?~WmdMuW$_>|;<)AAR0^ZkVi0M% zrpexd?5bveEUm!wFQDq%{l$YPiOH|r+Vr<7#lYb7Sd?Q|X#(=({MdHW^jcbJaG!Fs zn1U(1Rk^!U;6(oPG4N5MB<*4AuwOAA&IE&C>F>XVbzVNPA}Z{p<)uQ~miwp0rj5^8 zWl-NN>uAVrQ_J1_^??qBKq0u)d?q(dtyJX}Zpx^;yA~#TH6YVMkmLV{rQX{Nt<(naEB6e?+*x4oOmJGPF?|oNY*0^LAB{K!~Icu^pN9Md|nEz@X?5?@C zXxIg5O#lXlWePd&$Uki$wj$A{KMx6hdpAL;95@4~em@@p?CLIidi+$4roQPHu_=3l zL$PrisR0at1aiHkYR$NJFed!+2Ll!4ti;i_y7$3M)eG=}$>WT!5{u^o=u|YnJqXZP z#7UDzX@(O8*TZ7h-_*3t@G!Qp^a?6qN*^Ki4*>XBhH`(FoMnW6wWpu|TQtC9+Gs*c zI^1%AbEds0r9=@M>V^U1Zm=yHK8le(C>wU={CdijvQu*HblVgb9Z}EAq-rqh-me+d zT!nQ*hxP6=*#40WvtS45hp?uFz)K5+twjXVm=9f)Og`{`qQp)UH+gm}e!e3jA zd`@(yV>EMEUg!1q@uuOaA;(nqZzMhowr#lb_1GOfLBRKWeG-|nj^97;GAgTTKh!{e z7f>HIq68dR*RlvE>{}u83_s*7j?g@HV~PHowFWL{WUO@9+ ztHGK7i_RP{8-K&Z+sT1EY?}p;?5rikIZL!k7)muurAQ?y<9^8PGMk+`7F~bFZWxYV zk(tpGGH*}}#R+EPuoT1ajCk+=0#1HT{sa^8q8X%@p_PPu@rG= z1jY0v?OGE>OZ6S9Dt%bs}Q=LyFq&oKYyAF5m5xBgI8Zv6{@AOb(I)-*E^Eqpc` zBINhLnkP&cKL7kB+7-(G^CBg-zIS56Vt*omIL}dLOBpMUlCvayA9AcnCjpWDXyOwa zJUGr71oK%LosW`~Y7G%h07D=#U|re9*<#W+K!_ai4~%MF-=}x(1m(|T+Z?_b)XFtz zolau3{WSx)Hu(ra1p-L|!uG%#%`ymd=f3VtVL_RgjzCT!y)o}V!7aV^){r2V+BJXA1{kCc%t=x-G4m_l$hRDAHZ zgf8y){c#SiOj531*iB^?%|BhrYdWRENof_1$SA94lw0JH&bOJ6oPU582DlOqjZ0+o zym^uKnz{4hoiIMC=sGZpp{#M@PWxlVb4Z4E=hy`d9NFqeJb^Rx!pT^umM4rb&Q<;+ zr zaUJ=~IQ+N1J_p@uoY5psZ6?)QhK`#y>ezxKhKFF7o^{8r#V&325P?SN)WqNh*KxVHE+;RVXR1G0zQt-C*wNv-{FJJT z%2=gOcyxE4wv+wB&=k|dBR5Xsicx^N;_B*%%%7k?`ojfV<}t-_`nzog%01*FM^uLt z-_SpQf>rwDni$JsxmxcU#rT*;@%*R-pqtpYCOo66HpTn>Qz)vSV9Gl%!+1KcJ3!}w zXJD8}`(!q!3n`Q2?w$-ZbJjH!2HqEE(OWLjhx7=3VzkxRhBUo2hj;X@pV;08@Z0jK z*kgB?GbABO^oR2$!;pXdq3l0Cf#!uBuG#MT0!rjXd=bTJduL-E<{bc&YcWCR$ZnLs z3cLocLPcVa_5?TZ>=zc8l3b6%W03G|EdL`kI2z004bJ=VW}QW&kJG_S*&8|Bvnt46 z+&ae!2!_e>r>sLRTCxSF{9+9kyTkWf`RHTGwR?DwzI}qae*BbS z3o;_%zK-{xlp|me%s={fd?wOVMcVB7#Wp4G zp5bASD>|*~$AaY`*;DS0z&(I9&uy6}WNu2rnk4%_DU*U-W;ri0M-$Er^$pIfhB=qTaRJ!3B zo!Ekl`c5O0d98e>?C-)?Fc}%1;oYp$g!zU(Mn$x~%a`6DJ_SRMr9e+Kc2{0?#w}Rq zN@BQa(~I-wo{ZBq?5*w%QI7 zVaW5>!PUoZ8=ycP!>pS7l7)c-%Py4IoH+Nkac@Ygj75$B_z|VJ`Pdy0p_@n0A;tbe zli&;B(hC%(8MzV3QAQ6XUJ;l})H2a^e$MPmFUfn~7SFjfA&6NF_IhE|!3%}H<0L_t z6m)5*JXUR9{!6fM^1)ce^he2J*`CN@a=|kA?U5=1wx#?_WQGULb^+O6_Z3#0 zlIlN^(2mg))Jw~pl717rLqznOjKbd@3Axyit$M7Rc_vXD`cXdX@td|be=UvS?6y{2 zN=vV_IX<$|k(D=Dmj3q!dKC|Tchm7T#eI&zZe{G7!_LCXc}xqadtzzxxl(ieJO^80 zB0am9B~=T}y(E^@60|}-4nuAibh=zBwJ^}f@6h6BzrNnMc2Y+fuGa%71s=7`q9#Bi zgc70U!G%55@eHW$O6sjtGgV5p^uIE7q($2MZQz?2^L{N{$CvF<*L5qwn)@YsHw+a} zJidzg<&4HZnBBEx^Dni#8P$E)`Leqhcrh)%uZ>QbI^f*ze&`U$+|#BO?R8~ryqy-` z?a+2;%Of~Gr?`&AjfsR&QQCT)W^XeN!$UY}E$VU!^PaoX5|`qvl| zi(`C$dYtc2w+Q-?)hA=;IJFqSKFZlFKrEmo9OUhRXaQ-+1di zHOtP>cUtozS;VFdN1A10VlKi__;Qk^-*s|jt3BwBn}!pg*C>(jrS>b(bwSdrgPB98 zn|+G{TA9y<2v^?{6hynG*@JQVYEMoM3_6--U;WS(j>l`dcOMi(@`utYTf9&Hss|&5 zhT`crTl&94rKk&ui|4cJ_2WW)TqSOxFUn^G^cw_$grGg~B^J-4kd|eEp2di$uP91t zI^W~j0UM721B&fnM2cHjxlKDJp8>-0%OUXDQU7XIoI)`CZHPMpr~ zo$Xc0D1YJ#6yaY+5(emlqt5vqN5&T>m~KBUQvwSdjc`rbVUfXgbHVrT2y57n4osM5 z1?fcFP-v}zMPoQi*TKI*u(k6z^_IJ?K{d%ooId4i@S_ire_tepd6wRm=$SX`{3rvb zXT&gl`=bp#d-m{=7YdH*4LFk6gH99=>P7qWn|0AQYd!SsB80Z^*;~O;i^B1ir(KPo z4Gb<0vUqU%7^A!atZ(6XyPMLr`$n;x4jJ!se>2f$KvWoXR94n#Xj|265%MIXeH)9C z{!KL&ac~_o7m+FQxPhYcr}Dp-M4I?1@m_^kGZSkMZ!(&sSvRpY(r|XF+K^TLEl-PG zJu?xNnRM)_pG(H_YwxykMFM*=jrF~IR=u93wmGoZNG^@uvLPY0)4`_%ThbaYMzq7 zx^EF=_wQ*R#=gY8v)$W9vD^+B?>n7Aa$nwSP9KT+H!8&Lc{zH1(K3j0E5CUUq)moG zl;Gwi*7)SoEGc~?So@L5v|6jg*0F2a4%yqHa_a_DL-I(gURRR4fvz26jZ$jdTkQgh zsULhVyT2{5AarwWamnky+ec=$W1HY7L}M~Kq!@j@C>=TNx~X;2)i`y|9Bs0!fk5um zWa-~%(cN*^Lofbti3F1;7R6^3z=}{eUz&Hs#mRi@lj1e6W!aWuX}ms78%&E&L^UIo z5b#U`iNHWs&gL+{J{4wcGr+q%XTNi%CKrCp#!Ano&?b9IzybQYx$&sIW~7BYKfXsG zH}AS0mFe4 zP5F+GUS{-T!_3ghpJwc`Bf8F?AfecS^I3zZ-wddmk!OA85+YZdxsUkY916X^)rygk zIa06&h`3s-i$YUk6}r5AoK8CSJ702JS#iY;ocWmU)sy#uk;<;xv}XZ!by@#*6-9S6 ziHB#VHGDk4@!22$Y`@}zVJYiH$cL0R;%={HTO0|EKJmq=R=f!{paP~=7V5yKc|NeQ zcGzR*C1&O>kJQYBf|u~f)Z~Z~(biHNo^sWE`ULP_m4~t+96rZ(@HQ{p>hz()hC&<~h5TO51VcYZuQ+`_q(@io}Y*O^Oww8#VFbyn>NQ{E@=f zbx6F#gxRn$B3r*3{qWNFEv`kkI$SRXEvZ+P>+mkLT$9?p!R>znEe^t<1z;X~9f>|q ztncS;JPir4e@M)4`T+&ED(Xt;u6yAa5?FDeDI=Ko8KEl^tbe)6co*fmZZ?X%ufG>R z=OQZd4v2<0jZ4x=Brno?G6yUh|653~NeS11uSL<-EpxM}y@8jk^1j!c0?`PLEktWt zkb9^WxC;-u$*g_H;^=vH$hguWv?~!q!lX`x_TaDWVg%mmOG)Efk}_2A3ra6MlI+nd z3k#pt(2Ed!6C=}i3_etMXIU(tA1yBiFTyl-V!fy`l}QD;0Ag-D*1v$h*iU|t1n#ky z71C0r3LaQDFMCGl@(W1Zjk{TKERx=nln_i@s+ft0Kwjp#*ER-(ocIEUjc@teKxGw; zZs^xCQH2>cFpKtGdpL)dicB+^|E2{vD&0@GE}Z5j=CLr46ISd+P?pfA#=Pq$I$J7| z?+!3h+BQ2^BBS8kDKQ>RDm?`FLVKjIsqld{D~Nh+4P@f)5s6I`6G>YtDqg?K%ebzd zb%MJt*t{s>;h%gg!nApSzDP;ob0>yhdS-LG-%clLSYEmVs-x!rC%S-ZuR`;3=mdsc8h4fLz~W!K8$0l5ZqcZve}2oT5L>vXpvm%C-|YJVuRd$vG^oLD zVU)YmpzQP?pcIP&L~5j0YovAQv~+PM$wBul^4XI@VX4!6I6h+wiqtQ8FON<*Iaij? zbcE=kVvpSTXnJTDlaXXq>pR2ZxoSz5oX^ceTAdv2ZBHu|la z_H0~(J(GX*nw;J{;b1$xs|)Q*RE+w-K74mLUY1zw_XIeaY3%5zuJsSRU!&~{F<*Ac zS$=gT;(L@Zd!d<*=q&=DtINhUYKZdlrZ9n!CAHH{zL9JtrC+YPB)om=dO^&AxKm|- zw@OL(+bx-29-V}*veZW^zfSDGTZmNUx(#NEUN}C;{m?no>sglzTGmR+4=}kU(rvPO zaX2>@y~tlQuzkboXLlm0vTp>M5bmXSOJWO0-fki}T$)P#NzeDbaSGZwJV68-9ehzS+`0ns6zyK+E{o#td|pbbTGbm%1_8fNi&Kj9o{Wd2@JD zS1+1DaB1FgAn8-ZjeN85n~0pvZC_Q` z7&mX15R1N^2k!<4xevA3%->tFbMFX0Ar2d8f&Hx#nSSS1*ibY$%B0D!Ed7jCrw*F&Ak z{+@Z!m3g+cQl-XyjV&RD`mo8r*Rs@YHa-~%9e~nP z*V@mF#4=|-h)~+=oeICmp2K)k^`Z<)n+9vj!3 z(6&A7m9P9AEkDG$=u?ShfMy-V`_){2Z$y{>FWLJ^S@}S8G;B$i?bB*Op4$H?T^>>A z|Fc)9v4rQZobN*@p0_&9cutg|YO_q=I&lCF%Z9xL-;DA!8ad7~7*>PhXv;Np>v`Dg z=@(krAL|xc!5!--#*?lvTm4+;fr=A|UL3O_s<~Y7#HbDElD0N^Dm{%FXIF;j>&6kb z!P?G6KS}g7y%vn&aN^T0C{Qv(@!0tX1V$wy(h>r_}UAL~mmu$7&2W@UpmP zNM_RXz9~z4)^Sc!%jnXkJ4i=g~|MdjCqvp|^d#%FdQdoEl;euf0ysPC(dZ zH32?wAHlQ%XD+4smbfDcHFEpDO0*)M{!Q+kC9)M%frZrtQ-SKmB}#Y2=*QHGt=+M# zoZa#SI8hU}_+}dxrUI|of6_;Jd7juc8n(9MsefdyRoCPq%nssJPM9Ipb@#Mw9#56+#N-ReXm&8#k~io@{r0BJ@0yanPW5Cm21znR zhDomFwb~Gx{dj?g`t&P^b7uPGHN#^L=&Gpb!l=IK1J?%7hwNjizPm?B#mVaC83Rh} zBP~9eXit@MAnOES&Ns{`t79@mNCD@Fe}fDkwNN9px3=`^1aC?ePiaSBzCC{i+~@YR zeKMO~VM))>$#=BdnJHiUvR-K`xzW)G>b&|Zd;2%peg3Zta4jM(I7$nb$^?>7CL0q^ ziG`^Kj6RyC0K4G=$7aH(J4S(wP%WEO8>!i=<&o#P8jaT$jW~MFq;Luo6mQ&r$QznT zfAUC!I}M${k0cl=CwmIjby{3&(wMCFR)z~~O(9r;+H3uzWh(|RJXuZpV1Y|QwQA`GxF91ddFomjA-ORw)_?V(;hYyeAjvsfdmwMzh zef$m|B{}R_o)Kz6s0H~viwwClk=w^q(mOc15b0VmEK%7HB6)#PY1KA)YaI=z;zxwP zd}?#$__w|2J*qHCHIrZ}uE3hOd+rCxre-x_KZ>Y&kX+K^)zU%_z0Mw_Iru&7!cO&gGbD@#w(J3=}(v zsuDR}JYBAE7Vrp=V~KChF!84$OC;{b-C7@QMo~vV_cwHj>1xfxLq2BJ)=vRdp46);>a5#(NlsMDtWSRkJX~b`X=^XKSXuuClu4x@?TBS z>4*Qwf4&d(HuS z$Pqf9xX5#GRN*(vrkg|~Z^iWxZXs_G27moZ^3(^!;6rRT9TWt&FNR81ffeW@C6dI= zKp^*I(-s*L^q}A3_u5-(@AVg<*=|eUU=rL!(Mn42!4=}d=VBeD6_ccM6ICa9?JoO; zVRgv!An;#)4_<><)I+?{ieQ$`?SxpefvL153rHYequ`FVH>o)g)R{bbhnE4BgK3vvGy&5bh7hF2X2M+sRkQ-$v0T~ ze3o@P^Ec~UgLm^J@r2l&I>zNOSgI))42b3CjWl*Lx^YW}nx}6&Z+oZ$9h&l3?#fu2 z4&3cU5an-@xo4+VWXIi`5BX~X3!Eszq4}t~@cJ>P=Ym>x%*u*8JTXH6p+*NQKYYFx z0O!E`S{Aq~qQc-};}K?o_2ZDPf>4QUe+LQ?jfYBH!}kPk`r!!Vlpl0K);K8^7RWd1 zeRxx}COQWzAaos>C5bx8Vd~s_k_%#?wKJ724Jn^84AkizXYJ+GaISovGKmZ^&h#b{ zyH>T-xHN50E0arflI=dDULv@Q!qn%y+T8AL;h#D4^v)X2{ zzCi$gHyN{By4i$LtJ#l$Cu!M!9XK$-`qIT(rT4nua@of`O2`%UO*>jHIU39s@{kIS zYIYw3p@=&IM%MXhdL4Qq{N4{baAf=|Pe~d1dMVsLn=~ko>`B>hy63VhbVl+?KV|GZ zzb0<&=#yn7wx`&;`#Lwe3)OPL zJyFa<9ea=|jsJo9U^(8s@^_FI(1)+1u!f@f}g z+gJywAIoZmQIa{tQqXFJdxm?EUprnrp&SLe0vg9FV6Dxn!=KFRAwQ<72aam!nX{jm zRxl^7Bxww!dSSlfSlqe)^nLT$l7jbDPt*GxUBj?qz3C;eqgLkadA;UwdzMtY@-vi; z8f%C$D(oo?TrXL$4`Ie7g8ZQ-g5$t-jFSk`U2`hs4HgH^HPta8qUU1DI>~-*je+)m zSr7j-J;0cW5>Hl5+v|br-4Oua(-um!{OtU7vOYi&k4OUZ1YEsMEDtg#DDE{t~VJTB^gO)s|KR-|-H9sNWpV*VEgD;pi0O+jwiCWGCMJdb*7<%U}a~nlM zu84HXc&AS}cFwWNpe4zhKX!ugu$7|38d>3J()IM5Ud}{2TQ9T5_orkkm5?rJ=;+Z7 zt|)wsS@x)#c_nLmql)qeax67t!l+#Q?{t(r%FGit=bP*e-PeLKaA@beHEk!H)(7Og zr&8A88Pr?dNuQ1dS{sZyTWYjzBF0cunZZjgL5$NkR}x*7oU@a0wMS&c z?{HYZJg_5r0D8edt--0`QhEQ_c5Q1F9CQ_Q<2j_>hsGAo8GTgHCXZ}HGEE23MiNIh z5^VHzRa$LZo!EGd6kXi88@l1`avJzej!PV9!OYuN0ilE;TmQG)NwZ+rdduO&nCY1L zx}c>cYDFhGw=nKXsYA2JXz`2S2K!W8M~74JkjH;)c4%as3^EVKsHb{r{Dgv6iQUqE0 z&bnOdga(%)$Vgqkw%gI2us<@$j*Vy7F7PBdT=Rhj+aGLW|7sPJAErOZrOD7^v?d$> z7QeMTE^l={I0ewORFjdDxTS!vb#7RE!W#zSQ}-t$Q?4~`iw1Vf!^Yi7S3moq2~~M| z#38q4xr3VIaUoh$B1dCypP|tEGeMW9Kcvrtm<&-U@tt9=>lM`Z5v?pb!H<9;|A&O45hG$PGwTG)yi-E=KKDawzf zsDSript;XE2l?AiE%mMf+B8-lQ;{uRrW)_3yVxbSz8-w;s6hTAHAE>$lOP%%;(k$s zDWPu#KR33tm1avjyTQSIf8iPXu6vW`gxf=DX81tCNoErP4uZ*j@K2*p^}Jh)L9X zphTR`nF_=$yXk2Fc6Ii38|n_dMr%6sU|XiT6+%V#+N#2#O{Rqj)zv!IXm5jnPLny7 zhv>pUzZ|3X7Tyb6Cv-5LT^q6GXb1%8c*OSKeykb2|JaSFdXl!kfq-)3Q5LD;d z&i-mkAnX)2c)#g?(XSX=rKa(=H)?iE&Uy>3sl2r25-eq*S9kuN*x1aAiF`}t{p7v& zU<<*C%f0ehh()rEX(Ln?a9-2qG)KP7h|VFVPE(JjU<~WzPAVm}4mPlYe+d@$LJZfV z(WZ#SX+&T$rfY5(Z{_vA-a(PGvrgcBO2$PO%9NFfVfb2m7q;>>9>=@UDJGIvNMVokh8R;@+2sPTB& zV8;w);Ypl~!h0;0xUHw&b>HY_-m!IU4m+Lf>cZfUBi(=DL4$-LfyE^Y{AK#GzC_Ne zgicua7`N=L`&50`hq${|rnzu_!86ZbLO8OV`Xi=e6jF~TocOL9sPyiTt5v@7%(wp7 zzpm-xmutZZ*Fo1c17wLiO8}+dzW{GP;e$xSy>V<2TR1A!n3&H#N2lV^pgk=)MMcPOd;rqW#= zJ5#vg6*!R6IdM>!%zZF8WsQQ)nl}LOm)M5@Q&+ApjvTCvewAQo2bvHDaOFEl)K8;o zxIE$eyp;`1XRjLW{fm8R`LhR|Trt!3zJ9ybJ(EWZ(Yysjr@m9sj#KGemh!Hj-3}Xw`p8+Wy>xQdAZq;bf32NmSDRfEsEfM3p1pA*!hR;nP1Ol`or& zji4{X$=c^f8pN*|rBlC&qcS1+9MmLeb*Ox&(phY0o;Q+YUlx9oLZdS-Pw_8@D5K#r zAV@z7NPm;|{E%a@oiXkF+B*e~xs&ZOyE$G}JHy5n>a1$?Tj1cMxMcDxEfNwuFh5Z_ zm|}d?co$}LoVLK+YACk2ktQRYgdrh(Qp+63kLc%Qm zY~r5-lmQStd$@at0H_CW>1rgj_T&}S`N8g)r(g-|Xh4Y;Gtx_t<9U;c0}>xURK3SA ziL5c~aQB=^kuVpH#{k}nT$T>}yvm(#z+qzK@BqsNV&&&yA~2D`U&z)3IHW=~NA1`1 z6EiJ!UvqOlw2Lx>KiUla)_O&&y= zUbgaiE*Bb!h1f`c(U!AA@&(QQ@-UDN==lo0e5)K)I}zvdfxj*s)NJUCP!fS>0Q$KbcU| zCR?u*2I-~wmxg|L6hVMeeii0>MqCS%{kvY=iiW})OLt0G_XyFFuu>(F3F&@A*nVN- z?^dZryzJ+TRI|A&8tY4`1C2(rh9br+ZFc_MsU*U_a`xy4jGdcd|4(x=V`|MKL&{Rd ztxKndY@i&4mHrmwJra4PNW5D-1n(ebwLbTJBvk2E%X4K6GAMDzw=?o%4hWBDb8p!6U#LamFcEUM#0vipCaC*8# zr^XVR_@832HF|+|h0F+3kR?H{&mIE4)+sN}=-kV_od-OT7u@|SRda|{i1G2a329Ts zb|p+SUoAm1gN|cokyxl19xW=g0tx<}`n7cC7blmCFKOOC+EQ3|wlrK{>^eSbQ=B4y z;*YX;6Y)Pc7yn#J-LV6jJF3$^Qj&YdaQ8Xc-jJRN1FlB5kwPD5Dv2W*8Ueqmhrgb% zCuDPRiuxt76lwK^KE`GgLQo3V$bvU=5p)>its7-CtCQl!<%imO4Jhu?=6>JM!NRpT5c1UwOzc zyAWV|YsbnIc^X(`Xl#8>Wk3qQER`g16}Pb|2#p~5>ahIvUqSfPucp>+f|u^LzO{E_ z<$mIhyBJhIf-Jvo`|dJhJ19}r8A)rsvC#WHRKYO$Mcl`Q!%9l`wDM|9ImH_{-n|#b zVUG?bBNq|b&y&D#1%(OX|4KO#i&=f{`i4s81aeRfXEYHH>tln~vdWQDoL16^PW?m4 zvY{GsZg3Py(F|na6JioY9Q^n>oUPa=r6kqET1G@9PtGMK5V?mS)1y11X6;?MVB%LB zV6Zb~<|n+YoHrOSZhB07Z1t!E^5q``TddvdI`79SvdB#9w8F4J*~nQev4J{^P@Y0C z@{H?w=!aV}h`()pjAZsPTJX8Mqy-ESe%eo6U!KN;Dh)D3ue9^t_M|pbqqlej%;+WN z_UG!09?9<_4Fxk(IB*I!EASab;*RSFgHNYk)rqOGFngv*udKG>cD)iL4cjmBfnUh` zqum|310+{)A2K4vrUnxm%qh7gzTk0~G>%`cN8v~bAJ@K(j(A(DHqTa|B9%$m));{THTKuo8L)wbpP`#c7Z0d@f4QPC(pu|*f#ut#W=+msoZ9~+ zbo_CJmk9%&Td=sDZonl2_3g=?1AejyKC^_N-BjCnzS##5Yi$@)f+|ACzy(NaHR-R3 zyeFq$*_Xz|r064X<^53DkDEM3@&S+}@teDuwJ=+uUI`!t(KOa&gOn6K)%F0i0?>td zPYNVQm3Xs&Yt>Zp({Quu!Dfh(bQX75ON4 zVtm?pp14-%SLw8f1xiG&Y>1^Hx^Cy_bO>IByKVloN^eQRDz$lF#lxxmNIuc(_S=sC zW|GIzsCE80_w6G2%_8tWT(i11^RokujhfUoQ%bIjFgru4Fp`R%j-L`_$@5H#3=Inj zFLPZio(u#Eq3e#PE2LxdY+P8iwWDjm&DYu4!P$;y870rQYDleuaU=G$yfN?^{kVKS zQ7j9Mm4HpCO{hRW0%)$4Lj{Nj5}ud7bd+tS#h@xlm(%kpo@&jFjSpxEi>Du_1}qRl z2I_V!mb4I={~hJPk0V}omTw|S%-}Cl_<7@L25<5g%yhljVC(L!l|LH@+x-*h%(4!a|e za{TQ$OXmvL%b9l0KK4ojdL|7EGR4@1Jkwa+N6Os+5-uHu4?TQIK4*y2cSM|6l)}%R z4%Ea|SpX{T^o*Qx=wv1uo$ z>(bBgzWyb8oI~fxwoNp$=vTir>%VYsq_@rty3vZXzqX8#8Hl=7yp$tHIDgv-gI3$2 zRLG1-37+heo%R1-F7DHwrQUfvEN=U0BC?jX@b_}fj`Enr((U@vKS)ZXLrG;E?UbM7 z>m`Xs2+rs~d?SI}A^_sU?D-sTCc@QNJ&jmKQ6zfb^&onM^A>y6L+mzm#n^GI9 z_x(h8BoiGE>R}MmN63f4g#0E9c!L0Hqa14!r^tS}dXaf4$?-<^)8Q5uB7gA6&#`&^ zQ7C5#WSJLo*oLU-C3+Q-T#YfI69V$7hP$Dt_QY)}9g8Y`#eJ0@NtKgs$q3(oeQDbh z7111xO?~^6SILg!M=OsC1tW7isCmc7@_*}8M1r1%!6wtI-UJX&Q^=LCd>=Xf_UsMQn z#vVf4UM=uRr?{RFQ&ZEM8K+Fk*8Azxfr*S{*V{RbN~t;72A`#WesUMgR$6a%Bz!$k z3jbZQ2*XE6N4Gw)y3fo{;Uve^{)XcIAkc9j8iMncHwx1E6U2+S!zv?%Z`>vX3-guU z0${Pka+YFg+=oMiE?&;E-^CU79`@{X1V>+ZDeI^hMWo8lbiVD?4BnR}efM6dwOfpr z+S%%Y6U{wP7S5K&{Q_6Qu`$XfzxCBc+75T@N`Pk>eJAa@mAWeeLfRM=nR^FU-K&L70ox%e{sB#P&D!-xsC!!&Vjku`M=GZ2&+6*1OO0WN`O6`3xB$$$1Cc~P2 zZvZ;-`G{OcKuAXMls2ap51--}H zptl=EM4@mcj3RV|IXagPYshgsecOeafP}%}Cm~IPPm#d@psNUyXWi6B8>f>TmgMa^RW>1~ANq%#_s5CR)dF%~< zcO1UgMsi51$+gZT=SRN2k8idb5?$(}cbM=8&ani~!zHyv=6UaXg45G;OO4b2RIqR3 zbEUZpy6gV?+TKX@S&O$|if+4#->4=28#C?pf8t1!3l9ioLdm+cWGu?2zRDwc#OQqx+1Hb zCr6{jo8`XhhD;q^A^$0hw_w*w2D=c3K)pQkH`G)#$8vFzl-m9kxau?lN;K`2=BmGO zFGOPQq6Be=)KDNVj7cyoy9_qD`5e2RALME3V4R3=p}~g@?2m|uvR%RfCSIr=j|IhWUVUG z4wE|M3M86pgDvJ86c3lRQwM{G$Ny{W^`aR{zedy<`N3!*R9gliJdw?Tjb2Z4ql&uQ zl3awrju8-VkB)Z3mQ_>}g@af{#9Seqn5dsNL`k^_c+qh?N+hOjZTVGk^U)chEfSy% z)kMo3uuxfx!b86KEK3IF=INbk(wdBp`fjnRAJK@`xEa923uB$G|w_cZ!>&4{?@TjBYG5cFY z^l5PobrAswMhKX3G2dc9?Kl0u69Bz~G&T6zxh3RA1rjefirxV7%{2_npzkHfk3w&K zn*V5@Rn7(a7#k);4QlN8w9d$y!OfYmfuCJoE2K4+GnI`57*T&gDq9j5s!~a zI0mo(T=wU1FJG8G8Ho%EOvg_1a@1@mJ--F%c2~H2eXOeJP=G$K(RiVLbm;O-M?6F# z{Y>v6J#76KYi;amLV!EO9tp2JE|1*uVhl;3OJ}psuMjMm{Pue3(9BU_RvA<+cj@u> zP1*!GrIoN6)=aS^Q}&{?ItPPol0yLgc_z`h^oShF z_~v)HFtZF#@fkiA(in}sR*lz?L9qW0+0(*!tWYenV5 z?oUr8^11yk9puWv;pR`Q%gjMKq(Bu_1-%+=S(2ycOeC$-)#}`V+G%OUZ{~f)T#5hU z`Cv&bg>^du(V-#bPxF}JTl?Qu!T83flX|?!Vjk^-+F}O?&BZyfyxAz(%+OvA` zCfM1z#6%52Li|V!v;t4J*XVsfG{BFG_0prMHSt>XRFUfxaDTV(ThpYzA%V8(T5UZc zDv)EFXbU@FIrc5LOkV&mtW4+u%GgZh=&8}S#z5<&G4@DOcf&NbdZ?pevs)-7rg!ks ztEi(QqaR%kEaUtPi4(9tJMC$&zIE{@sUKJkEuASY5R-qpk;WyH= z;()&U0O1sM2M^6yT~T3E{i0b(K&1MN&sYJVg$vuSSeNh4@VDlET0&|fZ-?qS6b(oi z@4yEBlcN*m$!CHLDt>$ssx6C^TjdhlT#?~6wN6%f?AK6k^%DISF{4!>-#rd8GCvOK zMMNVtA;nO)E&hn9@-UJ2<ie;$?sEKP9+TSLG|18a?+l_+Qds01{PN?(6kf_+4veh)cfwX{4% zhT3a6vqe?MS#7#WUoV*^9)4y*G^lV7iR(C^&sBes8Fp`ak`ppcaSO5aB67s`MulPC zOZTEr$0iI;T?tAeaV{hb3w=*^3x0D!6{{wIjM3>1+PXNx87ps=Qg|5N#3KmBpZLn` z{X}`aAfkf+Mg#-ln8&7wM{KYxfN(=3yh|!Aa$T4&J;N?7ped%^cEZoBlB1^U+-LYL8kn@YvEgJfH7g{7XYu zLJ19&86g%gCtGvlJVH3tArUCkCsBV(m9^W=&_kXRENdTn+2x4u69>>ta%z5+AVl53cg*0hPswpCm$ z3|boDuK$EWR~Us!YHm5LJ^u7)wH`EudtoeVi7(u9#x`5xNBQ5B(|4NTS>QbeE?>fI|Y1lOJI)Igu zqRZV?;IV^6p0db*q^q8E8%EV`If3m+T8FE|)_H3I-}ss3(bw{2qFl-l8dO5xo{iD* z-y-^-Dazi!mn?O8OK`u~%{^hKdo5UF%XcjiAO%8o3nf>Hb-`=$dJV6#J6E%zfko7A zr)D>rfa*)iV&6b@K%2V2D`L0Ple@R(^s287W$%Jpx@@5fW?DC#@{Duiz9yXV)nKr| zzPja>>??$n}?mOGPg2)K1~L#|$b=M3e}*3z&@|NhH_lz_I?6{e2hvekd%51f5F@ z>y@LD%7*8+??1hzM4_WmCWOZUh*2*&rbk7W4T(=$GRFdx__%)Ux3UHPd(r0u-NW*C zdj#pCCdTDEbI~$E{bQ0_J^q+_DzB)1-Ti)&8~=OscU2ic3b$G}O1ywi#@n7-wB5C} zEK^LCL`Y01p&yM=EDT^HCyqc9FP!d&z`AuZT9qH_bZ=wMp@``X?Hf*@)K@x&uCpJpMD z)9qL%Lc^%;Xm7plwKG`D8|yV03JH>*CT17S-K8L9lx$`JcEJnP_yGWc(Sut*1IR%T z`o&DNr*?_s{-?uzPi}6;=!TuZYH);1xwyPm4b4)y=OLPt^G=vK&W89AzyxwQO0d z2~K70E%=7GNDqGSRpvSNZ#BNpf!MguMv(sT;DWL7T5DjZ#vgrp|2m4R)U6-*BkkBl z43r=eFY`9PnWB+XbVCoE1NLQKsbxKq7U2b4oI!cleDmz$ zE6R%Otdd7B=$e!J=pamMu5?Wq7T``6y&}vfKe!}d3J^Z-gQ?5~l?5Fdm^cLNI9V?( z*1nL|y&#ioe@%hxV4ihRjPK0tv*f%C2KjRbUb`t+lsUsJz;?|YU%;0!iKfPP<_!y; zv6+5{&h>H6>TOA*g9>#YCmQP}aReeHItP;x9>O z3ZT;5xucB%ec4(ZTyO)4YS^>4IS8H1&I^0x#udc0RU<3NyOigj>Ep8i8$oFPE>XSf zIO*NbCb`O8Erw|dzSH>~OVLYa6l_H_V{au2Cd7MGe;2eEjjV1aN9RPZddjzCW!1-LXb2KXZi%C6EH+wjG z&QGRY_D$+;Fjgjt(PlQ_df$jnyMEAcIW%XfJ3lbuCb=v8^(FK2XFTk&UPhC=LgXs) ziX6S(pSAkSPC<4pmtqS-;N;=L^Fx_OM*1@uLJ{h^;J4j0L7)WUuS4(Y<{bzGm1C=& zG@pw@PiyW^ywAhSINQlVgCTYKU_5bcM6>ysLAG9R3ZRc+&Dyq5s$Hd?CiN;|;6MSn z0U|**XMlDKjOds7!xfKY%;S!3EV70~8Z5^ppRT$R+*; zGML;gT2idEh}1eh#Jtkqo_)4RZtC7N@y>-sj}A`U3y#%=p?6T3#K%hc_g4C<$4=+V zUVZO%w|O11RH?ac0F4b8FzP-R1MmCBWv-+*bB5-!cp^a6_3y*hQflUV#EO5wl!A1s zMVnYDiBzGZj)tYHzXbBvSOX3rl}8`B9d71x7-)I;pQ7m8niIo03HWQyR_&9%)dm?E ztdN$)6w(t`_L!YC!y5y?J5^_#PpT#EjFo`whI#GcplwcGluooj_L~O{WHdFxGL@N=rkmxa0S)*L-sQok%{Ah*|J-y`FIqayme&($ivGXsfUpv)+(%afz8j! z!08;``p~;z9%QLnH$RBkuc7FIJQAYk#w{4*nM5OKsIh?ziPh zlbF$66>8*moO}Dcm$Ni9{e^haU->dRpFtoGgt%WI7|>t$F4oZA=XTUKfv@#E^iK^D zG|J@-hX9GJjY0U943TpF{G+a**VbZc$f497GF{NtXC_D^aV5bpvoAie#%A|lk_`=|;2Q==D_-0YCu zV6)%XI{`}(>=eXYTtEHWE$(DWT`C|F-(bfts9dKE=0c6L9tzpn9nkVdka4scQMxn> z5=cfK@xbOXX#oQ`yO(l|VtS?i$CYy@jo4N3yXpn4d)@>r*|9AKP2ep|W5EtbXRU>= ztH&3UC_2X*Odb8c)YG|D*c`$O{O4Y>c^$jP9Wyou;j=~ewVuF0z0Ik-gm*3M{8~+s*9pXJB9L;Y{&+j8s?AcHKy@Z+&z+w*+gk!M=~x$x`>wE6j|Zc!_R##A z-;avRTrv)(&7nRJrH}1>hhh~mgLaOd<7_ z31;zf?gX+tSlzUBkaw9V+h|SWr|r!ak6u}kEbw@q#P5RH-GnL67!gMnW`2}4QkK-# zdqs$~C46elll_dQtlR4%l{DVrcx!&)^lsq~2QiC$FQ+zL4mYRvp3eA_`k|{QQ)=b* z2Z^_kG5kGtGJR>C?5xh8XQS^s{WLPY3IF%70z1~Mu919i2HX^%F*XMf?~wj@Dp6e` zgLrT~+l2I4nQp&epDISBI?Oew3)Z>G@(B5PNhPev^?rKHxtg%dnWy-V;5=Ga#Hh&k zdd-}5mYl`HfRpMO@*wkP%@7IeM-IIxx{q>VY&Geni76OwHjbsRE-d@Gu$;oPKD_BM| zQP}w%WulqpHFgyrUgiH{lh>pccj#8B8U#F&pVT){{aU#kiNM4*Edk&lCF|?RfqA5M zQ6(oI5l4tF?iy|AV`e7*~%lf0XEPQ)*! z3m}7SUV#oaW~AHA`8+lK0p+B_3Tb(!=btpW4#+ zjX5}^^s^@+Crj7;m9}qsywTmj{6|p6s0J6OSR3MYdT4m*4!%fKYa9NNGo(%q!#)TW zMeb1uI)uJq5BNb;fa4D7IOaZ{ZaFeru*#N`%o#06=Cl{!7ZIu|m>aqG*r@lD)QE=v zCpW90Reba4emlkw9uMj{5_(ohH!fq%3vB(zn{TbEnXH5$5OB;hu1Q#Cqt2bdS)w$I+C%U@=3Y6OJzFS>y zcJk2QaG7C%x)uu`vz8^Y zInlWbfWplmH3OMG%7zQ*7}NI3Y(?)|LV^C)pYYMvZ7S~0gwfX6DuzyNIlV^m3~i$| zJwuq$%dz!9(8H(lyXv{q?=d&OByaff51lu9-}B?p8uNd-(biDH`=koAKDsQS2r`Z3 z2TtjJD45F}5RTz5gmT4d5w{2X1$(1gG_#^$WGJ3meAy@=>Lye@e%2OYz$03g`c2>1NzH&4jN$56{J!?jJREqVY>=p=y>wZsp47}@FE~vWIn6HZEw=wqU!RGOeIXC z#;w|Ie)9Bu^%)(QCh^u!%OS0$#MXd)MfFDcPw_ybtG}kJ(SY~xeeNSXwlGog{VSW= w`d9B`zrC7bpnKJTiTR2P8TtR)$NYAw_Qv~}e-4QM9(tuDrv|E$`4aMf0M43JzW@LL literal 0 HcmV?d00001 diff --git a/recipes/recipes/images/thumb/sphx_glr_example_recipe_thumb.png b/recipes/recipes/images/thumb/sphx_glr_example_recipe_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..233f8e605efca4bef384a7c603d53fdc385428bc GIT binary patch literal 26786 zcmdRV^;2BU^L2u2U~zXzaCgrl!EKS?!Gl|HcbDK2+!l9tx8Uwhki}V?FVCmmf8zb& zR^6I=Yo=zVr~8~U-QmiL(eC?7t2K>sEKRQ>SbW8C{gfQ0bg;;5VB{^5g#S%F4jS*yo)sIw9jjnBhR8wuiv*@haoMbsRi`-;s) z9j%)m&qag`YmLl;-VH-wNFilp)`}=RE6iLb8P1ZXot7lPS$|;j zQ6%{N?}NV#OoGh&8zR;Jmp{}RVNldC0&_43DEv|>!G?j^q;OxAyX7Gs_2A9hI-55o zJ)F=U@-pAz+w*?0hK)p%z(s{qJ1Q+Wv_8Z|z3!m1Cs=&-?)J_5Y815AaDH?$jYLqo(p$kO2iW|9P%S0_{dC|2h=c{Q!&bPfpfv0!@c%J z#_Pjh7|{hTI0kIgMg zxdOWkb%vZV^4lVL61EU=I9L*Tl+?h$RKCIz*L82w5j+y@957yB-O`t_fpVnvVkw>7 zP^nvW>L7(+HJXphZ(3gXtZxLHZqdPVywxY{9!|2K&AEn|@;JMhAZj7?4y>5SAxJrx zurYaog3=|Jtznf^wPDLtdAwz{-~VzM&<1f=cKIarDeaAWbp~AUPnA+mO6+`%gWKCS zLp?PEPTuCPmyhdAo0>J$4)AJv7RQniq@AMH$$_|;sq({O!`Uktc6UT#*dpB361N~I z%}5Y5rnnv`0pPZ?4^O==rd+Ct+d=y*e@vt1VbH@M#e;)=cJ$F$(v2B^Vh2r+Vx~*+Z!G1Zgmq&GRTTsGq2_{@Nw>D#DWRt=`t`6c z7T8oA;ZHP!1QLB9M_mrUm>*io2P8vwgJhjCQtU#iZyN}MKEQUSOs}$gCgi)#1GuGMe*f#+Z&G1I+vYi6K9g)e*Db>=_;17hfr#kK~YJsMnHz>ujQG zarIZk$|9PqN6mLW?3l6YSILh*)(}TG6B!@=Y&v9B#NjQ@cyJA{AT*Rq{1RO3KfRt& zITYih)&2p&c2`~JyxOI7rZ8o2AVPcMJ0duaT+CTY?{Jzelx=^QZxo4TA}5e|0FnJx z2h9h%+q97bj6^`YgpWV zZE^bI*Om;wM(XMc%-aGKa}>axr_VZ=;zDWt8O2-J^Bxv}Ut2%&^MW1SJF|SbFz&kG zy&cWX=>0)&urGzaisqDY!NnC_{E{vDTOHp*`@+VJID&^uD425yvR;&V7JOge@>7l;gt``i}FMDWyMVa`YI-%kD!E>S> zL|JS2ysS@b3@)Qb=BIHHjmnEDA|!p~!={`J0RJ#+a8dEKKlk|iw9a|2w`u3eeq7J2 zf-BAe-~bY*&0(nx$VAm(#iNTWBKx>hl>cQ(d|miS=*dXB;Oo=bVeiP)u35s(Z6STU zHpM@WqxIr<<7Z_^I!@Fq-s0HMekIN5ZdZuA8p<$z5M*`7l{e(44IGp#EYwEGo9l@@ z6GbbJ5wt%PcTH`mE9=xg_LPL9p}aIOXebdn84NVw&M*s5qmw zSAf$>HX1)|Ed8?UGO(gY6zH<4agAW*RQ-Ux7>@~#J zmS$%j>>%DF4Y=98ql77up`%Kpu;DPvUrv*EhV?47yMlP0yl6hzZ|fFAuTWA614#IsaO$_k zcURoeoZI7P`+2*MMC}ycnKbq zdhu&vPwa4VxmvM1`j?s0O}oTP&vV-WMtq+90RkmnkO&!Yvc#TgvQ`BFLbdrz?S;T~ zehN^!jvpxcNekpZ#eH3Pq#LsoY`b@>QN$aAV{~(&#mAEtd`K+bsZ4IGv=%(zq*3Xs z;3Rfjc zkktoX6uL9`be86_LQik`_Ya-b3(;-vd_U$lnA*wemDJ|?HO9AdUpJdGrMmL_VXdOB z53f;FLk`&=fS`S&PG^o`)InjY@-!!OH&@QVDqhZ(i_7rANfL{LH&pQ<#6nd=k@R&QX0zW~~Wq*_`b9rNZYq=4x|Qbh_5fW+|0jZXKA2Gglg!KYS4dpoJq9281$ ztI5PC9ZkFni;lbsSRI+`dV_<2vI#7bicpH+{d`{I&a(w2bj5n;~YRsn$5{c$bbM zLp`gi<}PqEx5=fu{@$`b8|&0pik;)ojkp*SGu9J-aY?~8d}E6YnF?L^Q*N$;h)+)M zM%+AaC)QY@X5SI#N78RF95e}jrx?i{u%sUH( z&F-em&Q+v^3Hfvted*?*6eORtd44he9$m_2uf_-u6H^xFve=)EF>h`gg3$ic6_)ea zQjmCUv-pK|(_$10b)X&)yZeaX`^%g0HIP?RAgl7^kiyoT z2Sc+M8OsBmIm<3h^&9K>>%hL~oMI^=-xk0PY9Hx^Ewq?q#4JliEENe7 zE(%!vQ~SSRB+U}ICT{;Xs%G|W-``$ zTF?jz$B)r(#XFeWV2$xsxW0HgSK7HZ@UUu-{kkE@Lky5%Pt6>X5`q95F(?F5Eu70V z!Y>OcUe9E%E7;u4;(y!~BRy<>z1^!Gdg6;?irc1Kk2vwYfn1HrnS{p1m`s{7WIG?q zAmwgD`j59UQbjRcnPP5pEhm5`vOhBW)AUYPuYPW_hIU&}F7HbX3fy+AX{6>soT1g# zD2<)GielaDxB>of?4Ec<(TnLceijD3Ze7h0suV%Imdj$OeLqMG6_R z2~|YF_uo{q{8~vyb`SpCf4k4BOkZ)C9Z#YMq^*{p-nuS;<;qOG_UAM1&feCCiddBx zMJw3EZGul1{lKuaW+!gvI&1gqo8~S?EHUJEyGCTllavF1YKf&rfe_)nm%h6q7}tQG zoP&Jl>0O2#)6-ziNKN4>5eq5R1U*{hVX|Ea$nt8Up;o}NK_}Sa^+yp2c6@lwN7ugJ zx0l!|AeDK{vY)+KzY##WxgI_mwN07=>*i9|zWkt|6wvLW-)tY*xl*^^CiPvX-1&~S zyr_gH%_YmC|;YvYVG~9*;Mw3dd$|J7DCGwH}Xc#hH=bz`L$TM>C*V9Kc%UCtgd=DUUdizG>g|erOOUj<7DvV(6^D9or9O(7bT=6$}@LsxS zk@>EP%gzAq6D(&R$zi@M3fmbeY2f0**!_C6MG`}CfPGjfhD5fNZ!~+ytpwiY6q(sD%>cY^p2zfX5}_gov(_;9#$3S2FiPC0h8M$c1_s|YrK#c9hz3Z zS^Mo0f{rwaO8HX#f0Cl>E3B&p7Iimm*->hfWS|jI^s54--;kJs@ivU1Uvm0JIKAxQ z9j51*FUG(H%gM>y2h={YoqQRrIN@HuB#z3i#`*@4*>PfO$_#ANdi@V-3-h5ytMH$} zA5)R2hKPDZ!<|e!qLyWl&f!h1?T77=v{Ed_)U)7s4lI!ezI@H@ITI_=4w;|$gGC-- zqnT{sKOCjQJ+dG{&a7cEarZm7OYq%#s7Z=E$&_Mo)5?Yc_!+|zWIxKujt7;W{M*z7 zdIpA^lJFVpS|T&%8BGUV4|TSSSKRLlbJ3V zpf#PCmbS|L{_K<&Jxj6cCd9S3EnNFPkhg4(T8rL!hEfy9guQ5#m7F?d5$6xV!L9Ki zvHaoMuQd_TC&Et~x3#ncbG9jf(}$D_`H$A`s#`h?WvFA{O5II{L~ z;n;$McVrUq$V7R=1u8=R4}3M6KNs{7Y?KY2aI97+-_26QPIK_Q^3L0#MTZob${Z(6 z;c{kO{Db;5W(oKwO=vOta`Vb9@fp)=U%2jM&T~ZXGYiH5+Y`dVL)}tKBc;PN=8Rjf z-lq^DIQt9dUauz1cphzsnIBy<6&VKNtG+TDk7yTT1&JG^#$7@9Ug0QLX8Cesr zDT4$yL9BpBb*hLn(j~jnr0Mpala3iKY%U z9&0$&f~ukr%wUCJ1$7_S%k3^gJY3Qo&x#D+>5*k7asU{pbo=+tuT>=ZkisU2J)SQl zNnsdHAtoh@YGRKCd;MzEo9XlJJANNAeL_%VDc2jS_@r*mfT-^FE0_4GiV6BUA}jj3 zYmvDcoN{XT@}^wT`ScccVr~%P1~c;45AR?55X4Dy_6w+|WBYIFo`5ZT5n4GZ9r7^C zZX9%iE?D4>((*4f@oZ2xAnJ=_!6C=pi;GTYt2Q&=X^4`L+uHX!t9K@4Hvsq)9-|7JXfeN3?RjZTGBQ)tpe$Ph3xgNn2vX-#!!xgl!h<}a>k3zZ}Q1r z0=yeM$ERRwwY0GCDS7BQS)I28KOC|IpeAsl+sXUdhjm2);H+zeM(`3>r4Ccp?+Aaw zRonhho{{ZHfJ*r}dGr)&XAt)$f)^ zwG&gX6WQYDMf$JF+%(T%22m)b!H$5&i`8gDlg-JaP^D5JIqak@ckkoWIn|{&fjV`` z+xegEsEnub@7%Hx{->Wu$h*c;fA5MEQ5lQHLpHa!np8c#)i1GirdNF(^RKg-G@5)C zw@yQS;dKgEo*B%2PoUn{gD90lUy+<(B#ycGfxvZBICB^%4sg@xoh*-q<$OZ7heJ;I zudhy0@1Im0s6L<7M4Jh+I<*C#s!OxX;2ZfBx9h0JseLe=xN zah5^>ps>R(59~(zOi1-jll(L&4e$yQNO;xc@w$CJU!{IebA?~WmdMuW$_>|;<)AAR0^ZkVi0M% zrpexd?5bveEUm!wFQDq%{l$YPiOH|r+Vr<7#lYb7Sd?Q|X#(=({MdHW^jcbJaG!Fs zn1U(1Rk^!U;6(oPG4N5MB<*4AuwOAA&IE&C>F>XVbzVNPA}Z{p<)uQ~miwp0rj5^8 zWl-NN>uAVrQ_J1_^??qBKq0u)d?q(dtyJX}Zpx^;yA~#TH6YVMkmLV{rQX{Nt<(naEB6e?+*x4oOmJGPF?|oNY*0^LAB{K!~Icu^pN9Md|nEz@X?5?@C zXxIg5O#lXlWePd&$Uki$wj$A{KMx6hdpAL;95@4~em@@p?CLIidi+$4roQPHu_=3l zL$PrisR0at1aiHkYR$NJFed!+2Ll!4ti;i_y7$3M)eG=}$>WT!5{u^o=u|YnJqXZP z#7UDzX@(O8*TZ7h-_*3t@G!Qp^a?6qN*^Ki4*>XBhH`(FoMnW6wWpu|TQtC9+Gs*c zI^1%AbEds0r9=@M>V^U1Zm=yHK8le(C>wU={CdijvQu*HblVgb9Z}EAq-rqh-me+d zT!nQ*hxP6=*#40WvtS45hp?uFz)K5+twjXVm=9f)Og`{`qQp)UH+gm}e!e3jA zd`@(yV>EMEUg!1q@uuOaA;(nqZzMhowr#lb_1GOfLBRKWeG-|nj^97;GAgTTKh!{e z7f>HIq68dR*RlvE>{}u83_s*7j?g@HV~PHowFWL{WUO@9+ ztHGK7i_RP{8-K&Z+sT1EY?}p;?5rikIZL!k7)muurAQ?y<9^8PGMk+`7F~bFZWxYV zk(tpGGH*}}#R+EPuoT1ajCk+=0#1HT{sa^8q8X%@p_PPu@rG= z1jY0v?OGE>OZ6S9Dt%bs}Q=LyFq&oKYyAF5m5xBgI8Zv6{@AOb(I)-*E^Eqpc` zBINhLnkP&cKL7kB+7-(G^CBg-zIS56Vt*omIL}dLOBpMUlCvayA9AcnCjpWDXyOwa zJUGr71oK%LosW`~Y7G%h07D=#U|re9*<#W+K!_ai4~%MF-=}x(1m(|T+Z?_b)XFtz zolau3{WSx)Hu(ra1p-L|!uG%#%`ymd=f3VtVL_RgjzCT!y)o}V!7aV^){r2V+BJXA1{kCc%t=x-G4m_l$hRDAHZ zgf8y){c#SiOj531*iB^?%|BhrYdWRENof_1$SA94lw0JH&bOJ6oPU582DlOqjZ0+o zym^uKnz{4hoiIMC=sGZpp{#M@PWxlVb4Z4E=hy`d9NFqeJb^Rx!pT^umM4rb&Q<;+ zr zaUJ=~IQ+N1J_p@uoY5psZ6?)QhK`#y>ezxKhKFF7o^{8r#V&325P?SN)WqNh*KxVHE+;RVXR1G0zQt-C*wNv-{FJJT z%2=gOcyxE4wv+wB&=k|dBR5Xsicx^N;_B*%%%7k?`ojfV<}t-_`nzog%01*FM^uLt z-_SpQf>rwDni$JsxmxcU#rT*;@%*R-pqtpYCOo66HpTn>Qz)vSV9Gl%!+1KcJ3!}w zXJD8}`(!q!3n`Q2?w$-ZbJjH!2HqEE(OWLjhx7=3VzkxRhBUo2hj;X@pV;08@Z0jK z*kgB?GbABO^oR2$!;pXdq3l0Cf#!uBuG#MT0!rjXd=bTJduL-E<{bc&YcWCR$ZnLs z3cLocLPcVa_5?TZ>=zc8l3b6%W03G|EdL`kI2z004bJ=VW}QW&kJG_S*&8|Bvnt46 z+&ae!2!_e>r>sLRTCxSF{9+9kyTkWf`RHTGwR?DwzI}qae*BbS z3o;_%zK-{xlp|me%s={fd?wOVMcVB7#Wp4G zp5bASD>|*~$AaY`*;DS0z&(I9&uy6}WNu2rnk4%_DU*U-W;ri0M-$Er^$pIfhB=qTaRJ!3B zo!Ekl`c5O0d98e>?C-)?Fc}%1;oYp$g!zU(Mn$x~%a`6DJ_SRMr9e+Kc2{0?#w}Rq zN@BQa(~I-wo{ZBq?5*w%QI7 zVaW5>!PUoZ8=ycP!>pS7l7)c-%Py4IoH+Nkac@Ygj75$B_z|VJ`Pdy0p_@n0A;tbe zli&;B(hC%(8MzV3QAQ6XUJ;l})H2a^e$MPmFUfn~7SFjfA&6NF_IhE|!3%}H<0L_t z6m)5*JXUR9{!6fM^1)ce^he2J*`CN@a=|kA?U5=1wx#?_WQGULb^+O6_Z3#0 zlIlN^(2mg))Jw~pl717rLqznOjKbd@3Axyit$M7Rc_vXD`cXdX@td|be=UvS?6y{2 zN=vV_IX<$|k(D=Dmj3q!dKC|Tchm7T#eI&zZe{G7!_LCXc}xqadtzzxxl(ieJO^80 zB0am9B~=T}y(E^@60|}-4nuAibh=zBwJ^}f@6h6BzrNnMc2Y+fuGa%71s=7`q9#Bi zgc70U!G%55@eHW$O6sjtGgV5p^uIE7q($2MZQz?2^L{N{$CvF<*L5qwn)@YsHw+a} zJidzg<&4HZnBBEx^Dni#8P$E)`Leqhcrh)%uZ>QbI^f*ze&`U$+|#BO?R8~ryqy-` z?a+2;%Of~Gr?`&AjfsR&QQCT)W^XeN!$UY}E$VU!^PaoX5|`qvl| zi(`C$dYtc2w+Q-?)hA=;IJFqSKFZlFKrEmo9OUhRXaQ-+1di zHOtP>cUtozS;VFdN1A10VlKi__;Qk^-*s|jt3BwBn}!pg*C>(jrS>b(bwSdrgPB98 zn|+G{TA9y<2v^?{6hynG*@JQVYEMoM3_6--U;WS(j>l`dcOMi(@`utYTf9&Hss|&5 zhT`crTl&94rKk&ui|4cJ_2WW)TqSOxFUn^G^cw_$grGg~B^J-4kd|eEp2di$uP91t zI^W~j0UM721B&fnM2cHjxlKDJp8>-0%OUXDQU7XIoI)`CZHPMpr~ zo$Xc0D1YJ#6yaY+5(emlqt5vqN5&T>m~KBUQvwSdjc`rbVUfXgbHVrT2y57n4osM5 z1?fcFP-v}zMPoQi*TKI*u(k6z^_IJ?K{d%ooId4i@S_ire_tepd6wRm=$SX`{3rvb zXT&gl`=bp#d-m{=7YdH*4LFk6gH99=>P7qWn|0AQYd!SsB80Z^*;~O;i^B1ir(KPo z4Gb<0vUqU%7^A!atZ(6XyPMLr`$n;x4jJ!se>2f$KvWoXR94n#Xj|265%MIXeH)9C z{!KL&ac~_o7m+FQxPhYcr}Dp-M4I?1@m_^kGZSkMZ!(&sSvRpY(r|XF+K^TLEl-PG zJu?xNnRM)_pG(H_YwxykMFM*=jrF~IR=u93wmGoZNG^@uvLPY0)4`_%ThbaYMzq7 zx^EF=_wQ*R#=gY8v)$W9vD^+B?>n7Aa$nwSP9KT+H!8&Lc{zH1(K3j0E5CUUq)moG zl;Gwi*7)SoEGc~?So@L5v|6jg*0F2a4%yqHa_a_DL-I(gURRR4fvz26jZ$jdTkQgh zsULhVyT2{5AarwWamnky+ec=$W1HY7L}M~Kq!@j@C>=TNx~X;2)i`y|9Bs0!fk5um zWa-~%(cN*^Lofbti3F1;7R6^3z=}{eUz&Hs#mRi@lj1e6W!aWuX}ms78%&E&L^UIo z5b#U`iNHWs&gL+{J{4wcGr+q%XTNi%CKrCp#!Ano&?b9IzybQYx$&sIW~7BYKfXsG zH}AS0mFe4 zP5F+GUS{-T!_3ghpJwc`Bf8F?AfecS^I3zZ-wddmk!OA85+YZdxsUkY916X^)rygk zIa06&h`3s-i$YUk6}r5AoK8CSJ702JS#iY;ocWmU)sy#uk;<;xv}XZ!by@#*6-9S6 ziHB#VHGDk4@!22$Y`@}zVJYiH$cL0R;%={HTO0|EKJmq=R=f!{paP~=7V5yKc|NeQ zcGzR*C1&O>kJQYBf|u~f)Z~Z~(biHNo^sWE`ULP_m4~t+96rZ(@HQ{p>hz()hC&<~h5TO51VcYZuQ+`_q(@io}Y*O^Oww8#VFbyn>NQ{E@=f zbx6F#gxRn$B3r*3{qWNFEv`kkI$SRXEvZ+P>+mkLT$9?p!R>znEe^t<1z;X~9f>|q ztncS;JPir4e@M)4`T+&ED(Xt;u6yAa5?FDeDI=Ko8KEl^tbe)6co*fmZZ?X%ufG>R z=OQZd4v2<0jZ4x=Brno?G6yUh|653~NeS11uSL<-EpxM}y@8jk^1j!c0?`PLEktWt zkb9^WxC;-u$*g_H;^=vH$hguWv?~!q!lX`x_TaDWVg%mmOG)Efk}_2A3ra6MlI+nd z3k#pt(2Ed!6C=}i3_etMXIU(tA1yBiFTyl-V!fy`l}QD;0Ag-D*1v$h*iU|t1n#ky z71C0r3LaQDFMCGl@(W1Zjk{TKERx=nln_i@s+ft0Kwjp#*ER-(ocIEUjc@teKxGw; zZs^xCQH2>cFpKtGdpL)dicB+^|E2{vD&0@GE}Z5j=CLr46ISd+P?pfA#=Pq$I$J7| z?+!3h+BQ2^BBS8kDKQ>RDm?`FLVKjIsqld{D~Nh+4P@f)5s6I`6G>YtDqg?K%ebzd zb%MJt*t{s>;h%gg!nApSzDP;ob0>yhdS-LG-%clLSYEmVs-x!rC%S-ZuR`;3=mdsc8h4fLz~W!K8$0l5ZqcZve}2oT5L>vXpvm%C-|YJVuRd$vG^oLD zVU)YmpzQP?pcIP&L~5j0YovAQv~+PM$wBul^4XI@VX4!6I6h+wiqtQ8FON<*Iaij? zbcE=kVvpSTXnJTDlaXXq>pR2ZxoSz5oX^ceTAdv2ZBHu|la z_H0~(J(GX*nw;J{;b1$xs|)Q*RE+w-K74mLUY1zw_XIeaY3%5zuJsSRU!&~{F<*Ac zS$=gT;(L@Zd!d<*=q&=DtINhUYKZdlrZ9n!CAHH{zL9JtrC+YPB)om=dO^&AxKm|- zw@OL(+bx-29-V}*veZW^zfSDGTZmNUx(#NEUN}C;{m?no>sglzTGmR+4=}kU(rvPO zaX2>@y~tlQuzkboXLlm0vTp>M5bmXSOJWO0-fki}T$)P#NzeDbaSGZwJV68-9ehzS+`0ns6zyK+E{o#td|pbbTGbm%1_8fNi&Kj9o{Wd2@JD zS1+1DaB1FgAn8-ZjeN85n~0pvZC_Q` z7&mX15R1N^2k!<4xevA3%->tFbMFX0Ar2d8f&Hx#nSSS1*ibY$%B0D!Ed7jCrw*F&Ak z{+@Z!m3g+cQl-XyjV&RD`mo8r*Rs@YHa-~%9e~nP z*V@mF#4=|-h)~+=oeICmp2K)k^`Z<)n+9vj!3 z(6&A7m9P9AEkDG$=u?ShfMy-V`_){2Z$y{>FWLJ^S@}S8G;B$i?bB*Op4$H?T^>>A z|Fc)9v4rQZobN*@p0_&9cutg|YO_q=I&lCF%Z9xL-;DA!8ad7~7*>PhXv;Np>v`Dg z=@(krAL|xc!5!--#*?lvTm4+;fr=A|UL3O_s<~Y7#HbDElD0N^Dm{%FXIF;j>&6kb z!P?G6KS}g7y%vn&aN^T0C{Qv(@!0tX1V$wy(h>r_}UAL~mmu$7&2W@UpmP zNM_RXz9~z4)^Sc!%jnXkJ4i=g~|MdjCqvp|^d#%FdQdoEl;euf0ysPC(dZ zH32?wAHlQ%XD+4smbfDcHFEpDO0*)M{!Q+kC9)M%frZrtQ-SKmB}#Y2=*QHGt=+M# zoZa#SI8hU}_+}dxrUI|of6_;Jd7juc8n(9MsefdyRoCPq%nssJPM9Ipb@#Mw9#56+#N-ReXm&8#k~io@{r0BJ@0yanPW5Cm21znR zhDomFwb~Gx{dj?g`t&P^b7uPGHN#^L=&Gpb!l=IK1J?%7hwNjizPm?B#mVaC83Rh} zBP~9eXit@MAnOES&Ns{`t79@mNCD@Fe}fDkwNN9px3=`^1aC?ePiaSBzCC{i+~@YR zeKMO~VM))>$#=BdnJHiUvR-K`xzW)G>b&|Zd;2%peg3Zta4jM(I7$nb$^?>7CL0q^ ziG`^Kj6RyC0K4G=$7aH(J4S(wP%WEO8>!i=<&o#P8jaT$jW~MFq;Luo6mQ&r$QznT zfAUC!I}M${k0cl=CwmIjby{3&(wMCFR)z~~O(9r;+H3uzWh(|RJXuZpV1Y|QwQA`GxF91ddFomjA-ORw)_?V(;hYyeAjvsfdmwMzh zef$m|B{}R_o)Kz6s0H~viwwClk=w^q(mOc15b0VmEK%7HB6)#PY1KA)YaI=z;zxwP zd}?#$__w|2J*qHCHIrZ}uE3hOd+rCxre-x_KZ>Y&kX+K^)zU%_z0Mw_Iru&7!cO&gGbD@#w(J3=}(v zsuDR}JYBAE7Vrp=V~KChF!84$OC;{b-C7@QMo~vV_cwHj>1xfxLq2BJ)=vRdp46);>a5#(NlsMDtWSRkJX~b`X=^XKSXuuClu4x@?TBS z>4*Qwf4&d(HuS z$Pqf9xX5#GRN*(vrkg|~Z^iWxZXs_G27moZ^3(^!;6rRT9TWt&FNR81ffeW@C6dI= zKp^*I(-s*L^q}A3_u5-(@AVg<*=|eUU=rL!(Mn42!4=}d=VBeD6_ccM6ICa9?JoO; zVRgv!An;#)4_<><)I+?{ieQ$`?SxpefvL153rHYequ`FVH>o)g)R{bbhnE4BgK3vvGy&5bh7hF2X2M+sRkQ-$v0T~ ze3o@P^Ec~UgLm^J@r2l&I>zNOSgI))42b3CjWl*Lx^YW}nx}6&Z+oZ$9h&l3?#fu2 z4&3cU5an-@xo4+VWXIi`5BX~X3!Eszq4}t~@cJ>P=Ym>x%*u*8JTXH6p+*NQKYYFx z0O!E`S{Aq~qQc-};}K?o_2ZDPf>4QUe+LQ?jfYBH!}kPk`r!!Vlpl0K);K8^7RWd1 zeRxx}COQWzAaos>C5bx8Vd~s_k_%#?wKJ724Jn^84AkizXYJ+GaISovGKmZ^&h#b{ zyH>T-xHN50E0arflI=dDULv@Q!qn%y+T8AL;h#D4^v)X2{ zzCi$gHyN{By4i$LtJ#l$Cu!M!9XK$-`qIT(rT4nua@of`O2`%UO*>jHIU39s@{kIS zYIYw3p@=&IM%MXhdL4Qq{N4{baAf=|Pe~d1dMVsLn=~ko>`B>hy63VhbVl+?KV|GZ zzb0<&=#yn7wx`&;`#Lwe3)OPL zJyFa<9ea=|jsJo9U^(8s@^_FI(1)+1u!f@f}g z+gJywAIoZmQIa{tQqXFJdxm?EUprnrp&SLe0vg9FV6Dxn!=KFRAwQ<72aam!nX{jm zRxl^7Bxww!dSSlfSlqe)^nLT$l7jbDPt*GxUBj?qz3C;eqgLkadA;UwdzMtY@-vi; z8f%C$D(oo?TrXL$4`Ie7g8ZQ-g5$t-jFSk`U2`hs4HgH^HPta8qUU1DI>~-*je+)m zSr7j-J;0cW5>Hl5+v|br-4Oua(-um!{OtU7vOYi&k4OUZ1YEsMEDtg#DDE{t~VJTB^gO)s|KR-|-H9sNWpV*VEgD;pi0O+jwiCWGCMJdb*7<%U}a~nlM zu84HXc&AS}cFwWNpe4zhKX!ugu$7|38d>3J()IM5Ud}{2TQ9T5_orkkm5?rJ=;+Z7 zt|)wsS@x)#c_nLmql)qeax67t!l+#Q?{t(r%FGit=bP*e-PeLKaA@beHEk!H)(7Og zr&8A88Pr?dNuQ1dS{sZyTWYjzBF0cunZZjgL5$NkR}x*7oU@a0wMS&c z?{HYZJg_5r0D8edt--0`QhEQ_c5Q1F9CQ_Q<2j_>hsGAo8GTgHCXZ}HGEE23MiNIh z5^VHzRa$LZo!EGd6kXi88@l1`avJzej!PV9!OYuN0ilE;TmQG)NwZ+rdduO&nCY1L zx}c>cYDFhGw=nKXsYA2JXz`2S2K!W8M~74JkjH;)c4%as3^EVKsHb{r{Dgv6iQUqE0 z&bnOdga(%)$Vgqkw%gI2us<@$j*Vy7F7PBdT=Rhj+aGLW|7sPJAErOZrOD7^v?d$> z7QeMTE^l={I0ewORFjdDxTS!vb#7RE!W#zSQ}-t$Q?4~`iw1Vf!^Yi7S3moq2~~M| z#38q4xr3VIaUoh$B1dCypP|tEGeMW9Kcvrtm<&-U@tt9=>lM`Z5v?pb!H<9;|A&O45hG$PGwTG)yi-E=KKDawzf zsDSript;XE2l?AiE%mMf+B8-lQ;{uRrW)_3yVxbSz8-w;s6hTAHAE>$lOP%%;(k$s zDWPu#KR33tm1avjyTQSIf8iPXu6vW`gxf=DX81tCNoErP4uZ*j@K2*p^}Jh)L9X zphTR`nF_=$yXk2Fc6Ii38|n_dMr%6sU|XiT6+%V#+N#2#O{Rqj)zv!IXm5jnPLny7 zhv>pUzZ|3X7Tyb6Cv-5LT^q6GXb1%8c*OSKeykb2|JaSFdXl!kfq-)3Q5LD;d z&i-mkAnX)2c)#g?(XSX=rKa(=H)?iE&Uy>3sl2r25-eq*S9kuN*x1aAiF`}t{p7v& zU<<*C%f0ehh()rEX(Ln?a9-2qG)KP7h|VFVPE(JjU<~WzPAVm}4mPlYe+d@$LJZfV z(WZ#SX+&T$rfY5(Z{_vA-a(PGvrgcBO2$PO%9NFfVfb2m7q;>>9>=@UDJGIvNMVokh8R;@+2sPTB& zV8;w);Ypl~!h0;0xUHw&b>HY_-m!IU4m+Lf>cZfUBi(=DL4$-LfyE^Y{AK#GzC_Ne zgicua7`N=L`&50`hq${|rnzu_!86ZbLO8OV`Xi=e6jF~TocOL9sPyiTt5v@7%(wp7 zzpm-xmutZZ*Fo1c17wLiO8}+dzW{GP;e$xSy>V<2TR1A!n3&H#N2lV^pgk=)MMcPOd;rqW#= zJ5#vg6*!R6IdM>!%zZF8WsQQ)nl}LOm)M5@Q&+ApjvTCvewAQo2bvHDaOFEl)K8;o zxIE$eyp;`1XRjLW{fm8R`LhR|Trt!3zJ9ybJ(EWZ(Yysjr@m9sj#KGemh!Hj-3}Xw`p8+Wy>xQdAZq;bf32NmSDRfEsEfM3p1pA*!hR;nP1Ol`or& zji4{X$=c^f8pN*|rBlC&qcS1+9MmLeb*Ox&(phY0o;Q+YUlx9oLZdS-Pw_8@D5K#r zAV@z7NPm;|{E%a@oiXkF+B*e~xs&ZOyE$G}JHy5n>a1$?Tj1cMxMcDxEfNwuFh5Z_ zm|}d?co$}LoVLK+YACk2ktQRYgdrh(Qp+63kLc%Qm zY~r5-lmQStd$@at0H_CW>1rgj_T&}S`N8g)r(g-|Xh4Y;Gtx_t<9U;c0}>xURK3SA ziL5c~aQB=^kuVpH#{k}nT$T>}yvm(#z+qzK@BqsNV&&&yA~2D`U&z)3IHW=~NA1`1 z6EiJ!UvqOlw2Lx>KiUla)_O&&y= zUbgaiE*Bb!h1f`c(U!AA@&(QQ@-UDN==lo0e5)K)I}zvdfxj*s)NJUCP!fS>0Q$KbcU| zCR?u*2I-~wmxg|L6hVMeeii0>MqCS%{kvY=iiW})OLt0G_XyFFuu>(F3F&@A*nVN- z?^dZryzJ+TRI|A&8tY4`1C2(rh9br+ZFc_MsU*U_a`xy4jGdcd|4(x=V`|MKL&{Rd ztxKndY@i&4mHrmwJra4PNW5D-1n(ebwLbTJBvk2E%X4K6GAMDzw=?o%4hWBDb8p!6U#LamFcEUM#0vipCaC*8# zr^XVR_@832HF|+|h0F+3kR?H{&mIE4)+sN}=-kV_od-OT7u@|SRda|{i1G2a329Ts zb|p+SUoAm1gN|cokyxl19xW=g0tx<}`n7cC7blmCFKOOC+EQ3|wlrK{>^eSbQ=B4y z;*YX;6Y)Pc7yn#J-LV6jJF3$^Qj&YdaQ8Xc-jJRN1FlB5kwPD5Dv2W*8Ueqmhrgb% zCuDPRiuxt76lwK^KE`GgLQo3V$bvU=5p)>its7-CtCQl!<%imO4Jhu?=6>JM!NRpT5c1UwOzc zyAWV|YsbnIc^X(`Xl#8>Wk3qQER`g16}Pb|2#p~5>ahIvUqSfPucp>+f|u^LzO{E_ z<$mIhyBJhIf-Jvo`|dJhJ19}r8A)rsvC#WHRKYO$Mcl`Q!%9l`wDM|9ImH_{-n|#b zVUG?bBNq|b&y&D#1%(OX|4KO#i&=f{`i4s81aeRfXEYHH>tln~vdWQDoL16^PW?m4 zvY{GsZg3Py(F|na6JioY9Q^n>oUPa=r6kqET1G@9PtGMK5V?mS)1y11X6;?MVB%LB zV6Zb~<|n+YoHrOSZhB07Z1t!E^5q``TddvdI`79SvdB#9w8F4J*~nQev4J{^P@Y0C z@{H?w=!aV}h`()pjAZsPTJX8Mqy-ESe%eo6U!KN;Dh)D3ue9^t_M|pbqqlej%;+WN z_UG!09?9<_4Fxk(IB*I!EASab;*RSFgHNYk)rqOGFngv*udKG>cD)iL4cjmBfnUh` zqum|310+{)A2K4vrUnxm%qh7gzTk0~G>%`cN8v~bAJ@K(j(A(DHqTa|B9%$m));{THTKuo8L)wbpP`#c7Z0d@f4QPC(pu|*f#ut#W=+msoZ9~+ zbo_CJmk9%&Td=sDZonl2_3g=?1AejyKC^_N-BjCnzS##5Yi$@)f+|ACzy(NaHR-R3 zyeFq$*_Xz|r064X<^53DkDEM3@&S+}@teDuwJ=+uUI`!t(KOa&gOn6K)%F0i0?>td zPYNVQm3Xs&Yt>Zp({Quu!Dfh(bQX75ON4 zVtm?pp14-%SLw8f1xiG&Y>1^Hx^Cy_bO>IByKVloN^eQRDz$lF#lxxmNIuc(_S=sC zW|GIzsCE80_w6G2%_8tWT(i11^RokujhfUoQ%bIjFgru4Fp`R%j-L`_$@5H#3=Inj zFLPZio(u#Eq3e#PE2LxdY+P8iwWDjm&DYu4!P$;y870rQYDleuaU=G$yfN?^{kVKS zQ7j9Mm4HpCO{hRW0%)$4Lj{Nj5}ud7bd+tS#h@xlm(%kpo@&jFjSpxEi>Du_1}qRl z2I_V!mb4I={~hJPk0V}omTw|S%-}Cl_<7@L25<5g%yhljVC(L!l|LH@+x-*h%(4!a|e za{TQ$OXmvL%b9l0KK4ojdL|7EGR4@1Jkwa+N6Os+5-uHu4?TQIK4*y2cSM|6l)}%R z4%Ea|SpX{T^o*Qx=wv1uo$ z>(bBgzWyb8oI~fxwoNp$=vTir>%VYsq_@rty3vZXzqX8#8Hl=7yp$tHIDgv-gI3$2 zRLG1-37+heo%R1-F7DHwrQUfvEN=U0BC?jX@b_}fj`Enr((U@vKS)ZXLrG;E?UbM7 z>m`Xs2+rs~d?SI}A^_sU?D-sTCc@QNJ&jmKQ6zfb^&onM^A>y6L+mzm#n^GI9 z_x(h8BoiGE>R}MmN63f4g#0E9c!L0Hqa14!r^tS}dXaf4$?-<^)8Q5uB7gA6&#`&^ zQ7C5#WSJLo*oLU-C3+Q-T#YfI69V$7hP$Dt_QY)}9g8Y`#eJ0@NtKgs$q3(oeQDbh z7111xO?~^6SILg!M=OsC1tW7isCmc7@_*}8M1r1%!6wtI-UJX&Q^=LCd>=Xf_UsMQn z#vVf4UM=uRr?{RFQ&ZEM8K+Fk*8Azxfr*S{*V{RbN~t;72A`#WesUMgR$6a%Bz!$k z3jbZQ2*XE6N4Gw)y3fo{;Uve^{)XcIAkc9j8iMncHwx1E6U2+S!zv?%Z`>vX3-guU z0${Pka+YFg+=oMiE?&;E-^CU79`@{X1V>+ZDeI^hMWo8lbiVD?4BnR}efM6dwOfpr z+S%%Y6U{wP7S5K&{Q_6Qu`$XfzxCBc+75T@N`Pk>eJAa@mAWeeLfRM=nR^FU-K&L70ox%e{sB#P&D!-xsC!!&Vjku`M=GZ2&+6*1OO0WN`O6`3xB$$$1Cc~P2 zZvZ;-`G{OcKuAXMls2ap51--}H zptl=EM4@mcj3RV|IXagPYshgsecOeafP}%}Cm~IPPm#d@psNUyXWi6B8>f>TmgMa^RW>1~ANq%#_s5CR)dF%~< zcO1UgMsi51$+gZT=SRN2k8idb5?$(}cbM=8&ani~!zHyv=6UaXg45G;OO4b2RIqR3 zbEUZpy6gV?+TKX@S&O$|if+4#->4=28#C?pf8t1!3l9ioLdm+cWGu?2zRDwc#OQqx+1Hb zCr6{jo8`XhhD;q^A^$0hw_w*w2D=c3K)pQkH`G)#$8vFzl-m9kxau?lN;K`2=BmGO zFGOPQq6Be=)KDNVj7cyoy9_qD`5e2RALME3V4R3=p}~g@?2m|uvR%RfCSIr=j|IhWUVUG z4wE|M3M86pgDvJ86c3lRQwM{G$Ny{W^`aR{zedy<`N3!*R9gliJdw?Tjb2Z4ql&uQ zl3awrju8-VkB)Z3mQ_>}g@af{#9Seqn5dsNL`k^_c+qh?N+hOjZTVGk^U)chEfSy% z)kMo3uuxfx!b86KEK3IF=INbk(wdBp`fjnRAJK@`xEa923uB$G|w_cZ!>&4{?@TjBYG5cFY z^l5PobrAswMhKX3G2dc9?Kl0u69Bz~G&T6zxh3RA1rjefirxV7%{2_npzkHfk3w&K zn*V5@Rn7(a7#k);4QlN8w9d$y!OfYmfuCJoE2K4+GnI`57*T&gDq9j5s!~a zI0mo(T=wU1FJG8G8Ho%EOvg_1a@1@mJ--F%c2~H2eXOeJP=G$K(RiVLbm;O-M?6F# z{Y>v6J#76KYi;amLV!EO9tp2JE|1*uVhl;3OJ}psuMjMm{Pue3(9BU_RvA<+cj@u> zP1*!GrIoN6)=aS^Q}&{?ItPPol0yLgc_z`h^oShF z_~v)HFtZF#@fkiA(in}sR*lz?L9qW0+0(*!tWYenV5 z?oUr8^11yk9puWv;pR`Q%gjMKq(Bu_1-%+=S(2ycOeC$-)#}`V+G%OUZ{~f)T#5hU z`Cv&bg>^du(V-#bPxF}JTl?Qu!T83flX|?!Vjk^-+F}O?&BZyfyxAz(%+OvA` zCfM1z#6%52Li|V!v;t4J*XVsfG{BFG_0prMHSt>XRFUfxaDTV(ThpYzA%V8(T5UZc zDv)EFXbU@FIrc5LOkV&mtW4+u%GgZh=&8}S#z5<&G4@DOcf&NbdZ?pevs)-7rg!ks ztEi(QqaR%kEaUtPi4(9tJMC$&zIE{@sUKJkEuASY5R-qpk;WyH= z;()&U0O1sM2M^6yT~T3E{i0b(K&1MN&sYJVg$vuSSeNh4@VDlET0&|fZ-?qS6b(oi z@4yEBlcN*m$!CHLDt>$ssx6C^TjdhlT#?~6wN6%f?AK6k^%DISF{4!>-#rd8GCvOK zMMNVtA;nO)E&hn9@-UJ2<ie;$?sEKP9+TSLG|18a?+l_+Qds01{PN?(6kf_+4veh)cfwX{4% zhT3a6vqe?MS#7#WUoV*^9)4y*G^lV7iR(C^&sBes8Fp`ak`ppcaSO5aB67s`MulPC zOZTEr$0iI;T?tAeaV{hb3w=*^3x0D!6{{wIjM3>1+PXNx87ps=Qg|5N#3KmBpZLn` z{X}`aAfkf+Mg#-ln8&7wM{KYxfN(=3yh|!Aa$T4&J;N?7ped%^cEZoBlB1^U+-LYL8kn@YvEgJfH7g{7XYu zLJ19&86g%gCtGvlJVH3tArUCkCsBV(m9^W=&_kXRENdTn+2x4u69>>ta%z5+AVl53cg*0hPswpCm$ z3|boDuK$EWR~Us!YHm5LJ^u7)wH`EudtoeVi7(u9#x`5xNBQ5B(|4NTS>QbeE?>fI|Y1lOJI)Igu zqRZV?;IV^6p0db*q^q8E8%EV`If3m+T8FE|)_H3I-}ss3(bw{2qFl-l8dO5xo{iD* z-y-^-Dazi!mn?O8OK`u~%{^hKdo5UF%XcjiAO%8o3nf>Hb-`=$dJV6#J6E%zfko7A zr)D>rfa*)iV&6b@K%2V2D`L0Ple@R(^s287W$%Jpx@@5fW?DC#@{Duiz9yXV)nKr| zzPja>>??$n}?mOGPg2)K1~L#|$b=M3e}*3z&@|NhH_lz_I?6{e2hvekd%51f5F@ z>y@LD%7*8+??1hzM4_WmCWOZUh*2*&rbk7W4T(=$GRFdx__%)Ux3UHPd(r0u-NW*C zdj#pCCdTDEbI~$E{bQ0_J^q+_DzB)1-Ti)&8~=OscU2ic3b$G}O1ywi#@n7-wB5C} zEK^LCL`Y01p&yM=EDT^HCyqc9FP!d&z`AuZT9qH_bZ=wMp@``X?Hf*@)K@x&uCpJpMD z)9qL%Lc^%;Xm7plwKG`D8|yV03JH>*CT17S-K8L9lx$`JcEJnP_yGWc(Sut*1IR%T z`o&DNr*?_s{-?uzPi}6;=!TuZYH);1xwyPm4b4)y=OLPt^G=vK&W89AzyxwQO0d z2~K70E%=7GNDqGSRpvSNZ#BNpf!MguMv(sT;DWL7T5DjZ#vgrp|2m4R)U6-*BkkBl z43r=eFY`9PnWB+XbVCoE1NLQKsbxKq7U2b4oI!cleDmz$ zE6R%Otdd7B=$e!J=pamMu5?Wq7T``6y&}vfKe!}d3J^Z-gQ?5~l?5Fdm^cLNI9V?( z*1nL|y&#ioe@%hxV4ihRjPK0tv*f%C2KjRbUb`t+lsUsJz;?|YU%;0!iKfPP<_!y; zv6+5{&h>H6>TOA*g9>#YCmQP}aReeHItP;x9>O z3ZT;5xucB%ec4(ZTyO)4YS^>4IS8H1&I^0x#udc0RU<3NyOigj>Ep8i8$oFPE>XSf zIO*NbCb`O8Erw|dzSH>~OVLYa6l_H_V{au2Cd7MGe;2eEjjV1aN9RPZddjzCW!1-LXb2KXZi%C6EH+wjG z&QGRY_D$+;Fjgjt(PlQ_df$jnyMEAcIW%XfJ3lbuCb=v8^(FK2XFTk&UPhC=LgXs) ziX6S(pSAkSPC<4pmtqS-;N;=L^Fx_OM*1@uLJ{h^;J4j0L7)WUuS4(Y<{bzGm1C=& zG@pw@PiyW^ywAhSINQlVgCTYKU_5bcM6>ysLAG9R3ZRc+&Dyq5s$Hd?CiN;|;6MSn z0U|**XMlDKjOds7!xfKY%;S!3EV70~8Z5^ppRT$R+*; zGML;gT2idEh}1eh#Jtkqo_)4RZtC7N@y>-sj}A`U3y#%=p?6T3#K%hc_g4C<$4=+V zUVZO%w|O11RH?ac0F4b8FzP-R1MmCBWv-+*bB5-!cp^a6_3y*hQflUV#EO5wl!A1s zMVnYDiBzGZj)tYHzXbBvSOX3rl}8`B9d71x7-)I;pQ7m8niIo03HWQyR_&9%)dm?E ztdN$)6w(t`_L!YC!y5y?J5^_#PpT#EjFo`whI#GcplwcGluooj_L~O{WHdFxGL@N=rkmxa0S)*L-sQok%{Ah*|J-y`FIqayme&($ivGXsfUpv)+(%afz8j! z!08;``p~;z9%QLnH$RBkuc7FIJQAYk#w{4*nM5OKsIh?ziPh zlbF$66>8*moO}Dcm$Ni9{e^haU->dRpFtoGgt%WI7|>t$F4oZA=XTUKfv@#E^iK^D zG|J@-hX9GJjY0U943TpF{G+a**VbZc$f497GF{NtXC_D^aV5bpvoAie#%A|lk_`=|;2Q==D_-0YCu zV6)%XI{`}(>=eXYTtEHWE$(DWT`C|F-(bfts9dKE=0c6L9tzpn9nkVdka4scQMxn> z5=cfK@xbOXX#oQ`yO(l|VtS?i$CYy@jo4N3yXpn4d)@>r*|9AKP2ep|W5EtbXRU>= ztH&3UC_2X*Odb8c)YG|D*c`$O{O4Y>c^$jP9Wyou;j=~ewVuF0z0Ik-gm*3M{8~+s*9pXJB9L;Y{&+j8s?AcHKy@Z+&z+w*+gk!M=~x$x`>wE6j|Zc!_R##A z-;avRTrv)(&7nRJrH}1>hhh~mgLaOd<7_ z31;zf?gX+tSlzUBkaw9V+h|SWr|r!ak6u}kEbw@q#P5RH-GnL67!gMnW`2}4QkK-# zdqs$~C46elll_dQtlR4%l{DVrcx!&)^lsq~2QiC$FQ+zL4mYRvp3eA_`k|{QQ)=b* z2Z^_kG5kGtGJR>C?5xh8XQS^s{WLPY3IF%70z1~Mu919i2HX^%F*XMf?~wj@Dp6e` zgLrT~+l2I4nQp&epDISBI?Oew3)Z>G@(B5PNhPev^?rKHxtg%dnWy-V;5=Ga#Hh&k zdd-}5mYl`HfRpMO@*wkP%@7IeM-IIxx{q>VY&Geni76OwHjbsRE-d@Gu$;oPKD_BM| zQP}w%WulqpHFgyrUgiH{lh>pccj#8B8U#F&pVT){{aU#kiNM4*Edk&lCF|?RfqA5M zQ6(oI5l4tF?iy|AV`e7*~%lf0XEPQ)*! z3m}7SUV#oaW~AHA`8+lK0p+B_3Tb(!=btpW4#+ zjX5}^^s^@+Crj7;m9}qsywTmj{6|p6s0J6OSR3MYdT4m*4!%fKYa9NNGo(%q!#)TW zMeb1uI)uJq5BNb;fa4D7IOaZ{ZaFeru*#N`%o#06=Cl{!7ZIu|m>aqG*r@lD)QE=v zCpW90Reba4emlkw9uMj{5_(ohH!fq%3vB(zn{TbEnXH5$5OB;hu1Q#Cqt2bdS)w$I+C%U@=3Y6OJzFS>y zcJk2QaG7C%x)uu`vz8^Y zInlWbfWplmH3OMG%7zQ*7}NI3Y(?)|LV^C)pYYMvZ7S~0gwfX6DuzyNIlV^m3~i$| zJwuq$%dz!9(8H(lyXv{q?=d&OByaff51lu9-}B?p8uNd-(biDH`=koAKDsQS2r`Z3 z2TtjJD45F}5RTz5gmT4d5w{2X1$(1gG_#^$WGJ3meAy@=>Lye@e%2OYz$03g`c2>1NzH&4jN$56{J!?jJREqVY>=p=y>wZsp47}@FE~vWIn6HZEw=wqU!RGOeIXC z#;w|Ie)9Bu^%)(QCh^u!%OS0$#MXd)MfFDcPw_ybtG}kJ(SY~xeeNSXwlGog{VSW= w`d9B`zrC7bpnKJTiTR2P8TtR)$NYAw_Qv~}e-4QM9(tuDrv|E$`4aMf0M43JzW@LL literal 0 HcmV?d00001 diff --git a/recipes/recipes_index.rst b/recipes/recipes_index.rst new file mode 100644 index 0000000000..02f3595b16 --- /dev/null +++ b/recipes/recipes_index.rst @@ -0,0 +1,20 @@ +PyTorch Recipes +--------------------------------------------- +Recipes are bite-sized bite-sized, actionable examples of how to use specific PyTorch features, different from our full-length tutorials. + +.. toctree:: + :hidden: + + /recipes/recipes/autograd_tutorial + /recipes/recipes/example_recipe + + +.. galleryitem:: /recipes/recipes/autograd_tutorial.py + :figure: /_static/img/autodiff.png + +.. galleryitem:: /recipes/recipes/example_recipe.py + :figure: /_static/img/autodiff.png + +.. raw:: html + +
diff --git a/recipes_source/README.txt b/recipes_source/README.txt index 5d46a28aad..26ea202305 100644 --- a/recipes_source/README.txt +++ b/recipes_source/README.txt @@ -1,7 +1,5 @@ Recipes ------------------ - -1. recipes/* and recipes_main.rst - PyTorch Recipes - https://pytorch.org/tutorials/recipes/recipes_main.html - +1. recipes/* and recipes_index.rst + PyTorch Recipes + https://pytorch.org/tutorials/recipes/recipes_index.html diff --git a/recipes_source/recipes/README.txt b/recipes_source/recipes/README.txt index 8b34cae4f9..50b76bd23b 100644 --- a/recipes_source/recipes/README.txt +++ b/recipes_source/recipes/README.txt @@ -2,19 +2,10 @@ PyTorch Recipes --------------------------------------------- 1. autograd_tutorial.py - Autograd: Automatic Differentiation - https://pytorch.org/tutorials/recipes/recipes/autograd_tutorial.html - -2. cifar10_tutorial.py - Training a Classifier - https://pytorch.org/tutorials/recipes/recipes/cifar10_tutorial.html - -3. neural_networks_tutorial.py - Neural Networks - https://pytorch.org/tutorials/recipes/recipes/neural_networks_tutorial.html# - -4. data_parallel_tutorial.py - Optional: Data Parallelism - https://pytorch.org/tutorials/recipes/recipes/data_parallel_tutorial.html + Autograd: Automatic Differentiation + https://pytorch.org/tutorials/recipes/recipes/autograd_tutorial.html +1. example_recipe.py + Example Recipe + https://pytorch.org/tutorials/recipes/recipes/example_recipe.html diff --git a/recipes_source/recipes/cifar10_tutorial.py b/recipes_source/recipes/cifar10_tutorial.py deleted file mode 100644 index 730bf6ac98..0000000000 --- a/recipes_source/recipes/cifar10_tutorial.py +++ /dev/null @@ -1,358 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Training a Classifier -===================== - -This is it. You have seen how to define neural networks, compute loss and make -updates to the weights of the network. - -Now you might be thinking, - -What about data? ----------------- - -Generally, when you have to deal with image, text, audio or video data, -you can use standard python packages that load data into a numpy array. -Then you can convert this array into a ``torch.*Tensor``. - -- For images, packages such as Pillow, OpenCV are useful -- For audio, packages such as scipy and librosa -- For text, either raw Python or Cython based loading, or NLTK and - SpaCy are useful - -Specifically for vision, we have created a package called -``torchvision``, that has data loaders for common datasets such as -Imagenet, CIFAR10, MNIST, etc. and data transformers for images, viz., -``torchvision.datasets`` and ``torch.utils.data.DataLoader``. - -This provides a huge convenience and avoids writing boilerplate code. - -For this tutorial, we will use the CIFAR10 dataset. -It has the classes: ‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’, -‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’. The images in CIFAR-10 are of -size 3x32x32, i.e. 3-channel color images of 32x32 pixels in size. - -.. figure:: /_static/img/cifar10.png - :alt: cifar10 - - cifar10 - - -Training an image classifier ----------------------------- - -We will do the following steps in order: - -1. Load and normalizing the CIFAR10 training and test datasets using - ``torchvision`` -2. Define a Convolutional Neural Network -3. Define a loss function -4. Train the network on the training data -5. Test the network on the test data - -1. Loading and normalizing CIFAR10 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Using ``torchvision``, it’s extremely easy to load CIFAR10. -""" -import torch -import torchvision -import torchvision.transforms as transforms - -######################################################################## -# The output of torchvision datasets are PILImage images of range [0, 1]. -# We transform them to Tensors of normalized range [-1, 1]. -# .. note:: -# If running on Windows and you get a BrokenPipeError, try setting -# the num_worker of torch.utils.data.DataLoader() to 0. - -transform = transforms.Compose( - [transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) - -trainset = torchvision.datasets.CIFAR10(root='./data', train=True, - download=True, transform=transform) -trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, - shuffle=True, num_workers=2) - -testset = torchvision.datasets.CIFAR10(root='./data', train=False, - download=True, transform=transform) -testloader = torch.utils.data.DataLoader(testset, batch_size=4, - shuffle=False, num_workers=2) - -classes = ('plane', 'car', 'bird', 'cat', - 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') - -######################################################################## -# Let us show some of the training images, for fun. - -import matplotlib.pyplot as plt -import numpy as np - -# functions to show an image - - -def imshow(img): - img = img / 2 + 0.5 # unnormalize - npimg = img.numpy() - plt.imshow(np.transpose(npimg, (1, 2, 0))) - plt.show() - - -# get some random training images -dataiter = iter(trainloader) -images, labels = dataiter.next() - -# show images -imshow(torchvision.utils.make_grid(images)) -# print labels -print(' '.join('%5s' % classes[labels[j]] for j in range(4))) - - -######################################################################## -# 2. Define a Convolutional Neural Network -# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -# Copy the neural network from the Neural Networks section before and modify it to -# take 3-channel images (instead of 1-channel images as it was defined). - -import torch.nn as nn -import torch.nn.functional as F - - -class Net(nn.Module): - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x): - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - -net = Net() - -######################################################################## -# 3. Define a Loss function and optimizer -# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -# Let's use a Classification Cross-Entropy loss and SGD with momentum. - -import torch.optim as optim - -criterion = nn.CrossEntropyLoss() -optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - -######################################################################## -# 4. Train the network -# ^^^^^^^^^^^^^^^^^^^^ -# -# This is when things start to get interesting. -# We simply have to loop over our data iterator, and feed the inputs to the -# network and optimize. - -for epoch in range(2): # loop over the dataset multiple times - - running_loss = 0.0 - for i, data in enumerate(trainloader, 0): - # get the inputs; data is a list of [inputs, labels] - inputs, labels = data - - # zero the parameter gradients - optimizer.zero_grad() - - # forward + backward + optimize - outputs = net(inputs) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - # print statistics - running_loss += loss.item() - if i % 2000 == 1999: # print every 2000 mini-batches - print('[%d, %5d] loss: %.3f' % - (epoch + 1, i + 1, running_loss / 2000)) - running_loss = 0.0 - -print('Finished Training') - -######################################################################## -# Let's quickly save our trained model: - -PATH = './cifar_net.pth' -torch.save(net.state_dict(), PATH) - -######################################################################## -# See `here `_ -# for more details on saving PyTorch models. -# -# 5. Test the network on the test data -# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -# -# We have trained the network for 2 passes over the training dataset. -# But we need to check if the network has learnt anything at all. -# -# We will check this by predicting the class label that the neural network -# outputs, and checking it against the ground-truth. If the prediction is -# correct, we add the sample to the list of correct predictions. -# -# Okay, first step. Let us display an image from the test set to get familiar. - -dataiter = iter(testloader) -images, labels = dataiter.next() - -# print images -imshow(torchvision.utils.make_grid(images)) -print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4))) - -######################################################################## -# Next, let's load back in our saved model (note: saving and re-loading the model -# wasn't necessary here, we only did it to illustrate how to do so): - -net = Net() -net.load_state_dict(torch.load(PATH)) - -######################################################################## -# Okay, now let us see what the neural network thinks these examples above are: - -outputs = net(images) - -######################################################################## -# The outputs are energies for the 10 classes. -# The higher the energy for a class, the more the network -# thinks that the image is of the particular class. -# So, let's get the index of the highest energy: -_, predicted = torch.max(outputs, 1) - -print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] - for j in range(4))) - -######################################################################## -# The results seem pretty good. -# -# Let us look at how the network performs on the whole dataset. - -correct = 0 -total = 0 -with torch.no_grad(): - for data in testloader: - images, labels = data - outputs = net(images) - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - -print('Accuracy of the network on the 10000 test images: %d %%' % ( - 100 * correct / total)) - -######################################################################## -# That looks way better than chance, which is 10% accuracy (randomly picking -# a class out of 10 classes). -# Seems like the network learnt something. -# -# Hmmm, what are the classes that performed well, and the classes that did -# not perform well: - -class_correct = list(0. for i in range(10)) -class_total = list(0. for i in range(10)) -with torch.no_grad(): - for data in testloader: - images, labels = data - outputs = net(images) - _, predicted = torch.max(outputs, 1) - c = (predicted == labels).squeeze() - for i in range(4): - label = labels[i] - class_correct[label] += c[i].item() - class_total[label] += 1 - - -for i in range(10): - print('Accuracy of %5s : %2d %%' % ( - classes[i], 100 * class_correct[i] / class_total[i])) - -######################################################################## -# Okay, so what next? -# -# How do we run these neural networks on the GPU? -# -# Training on GPU -# ---------------- -# Just like how you transfer a Tensor onto the GPU, you transfer the neural -# net onto the GPU. -# -# Let's first define our device as the first visible cuda device if we have -# CUDA available: - -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - -# Assuming that we are on a CUDA machine, this should print a CUDA device: - -print(device) - -######################################################################## -# The rest of this section assumes that ``device`` is a CUDA device. -# -# Then these methods will recursively go over all modules and convert their -# parameters and buffers to CUDA tensors: -# -# .. code:: python -# -# net.to(device) -# -# -# Remember that you will have to send the inputs and targets at every step -# to the GPU too: -# -# .. code:: python -# -# inputs, labels = data[0].to(device), data[1].to(device) -# -# Why dont I notice MASSIVE speedup compared to CPU? Because your network -# is really small. -# -# **Exercise:** Try increasing the width of your network (argument 2 of -# the first ``nn.Conv2d``, and argument 1 of the second ``nn.Conv2d`` – -# they need to be the same number), see what kind of speedup you get. -# -# **Goals achieved**: -# -# - Understanding PyTorch's Tensor library and neural networks at a high level. -# - Train a small neural network to classify images -# -# Training on multiple GPUs -# ------------------------- -# If you want to see even more MASSIVE speedup using all of your GPUs, -# please check out :doc:`data_parallel_tutorial`. -# -# Where do I go next? -# ------------------- -# -# - :doc:`Train neural nets to play video games ` -# - `Train a state-of-the-art ResNet network on imagenet`_ -# - `Train a face generator using Generative Adversarial Networks`_ -# - `Train a word-level language model using Recurrent LSTM networks`_ -# - `More examples`_ -# - `More tutorials`_ -# - `Discuss PyTorch on the Forums`_ -# - `Chat with other users on Slack`_ -# -# .. _Train a state-of-the-art ResNet network on imagenet: https://github.com/pytorch/examples/tree/master/imagenet -# .. _Train a face generator using Generative Adversarial Networks: https://github.com/pytorch/examples/tree/master/dcgan -# .. _Train a word-level language model using Recurrent LSTM networks: https://github.com/pytorch/examples/tree/master/word_language_model -# .. _More examples: https://github.com/pytorch/examples -# .. _More tutorials: https://github.com/pytorch/tutorials -# .. _Discuss PyTorch on the Forums: https://discuss.pytorch.org/ -# .. _Chat with other users on Slack: https://pytorch.slack.com/messages/beginner/ - -# %%%%%%INVISIBLE_CODE_BLOCK%%%%%% -del dataiter -# %%%%%%INVISIBLE_CODE_BLOCK%%%%%% diff --git a/recipes_source/recipes/custom_dataset.ipynb b/recipes_source/recipes/custom_dataset.ipynb new file mode 100644 index 0000000000..c00d196228 --- /dev/null +++ b/recipes_source/recipes/custom_dataset.ipynb @@ -0,0 +1,383 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "custom_dataset.ipynb", + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "DCgx3NYWvMfb", + "colab_type": "text" + }, + "source": [ + "# Writing Custom Datasets" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ViYQRS93vH39", + "colab_type": "text" + }, + "source": [ + "A significant amount of the effort applied to developing machine learning algorithms is related to data preparation. PyTorch provides many tools to make data loading easy and hopefully, makes your code more readable. In this recipe, you will learn how to create a custom dataset leveraging the PyTorch dataset APIs. \n", + "\n", + "Please note, to run this tutorial, ensure the following packages are\n", + "installed:\n", + "- ``scikit-image``: For image io and transforms\n", + "- ``pandas``: For easier csv parsing\n", + "\n", + "As a point of attribution, this recipe is based on the original tutorial from `Sasank Chilamkurthy ` and was later edited by `Joe Spisak `." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qOyML7wg1BMB", + "colab_type": "text" + }, + "source": [ + "### First let's import all of the needed libraries for this recipe" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "2Rc27Y3fugUl", + "colab_type": "code", + "colab": {} + }, + "source": [ + "from __future__ import print_function, division\n", + "import os\n", + "import torch\n", + "import pandas as pd\n", + "from skimage import io, transform\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from torch.utils.data import Dataset, DataLoader\n", + "from torchvision import transforms, utils\n", + "\n", + "# Ignore warnings\n", + "import warnings\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "plt.ion() # interactive mode" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "f2rHNlke17xe", + "colab_type": "text" + }, + "source": [ + "### The Dataset we'll use as a starting point" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SyzczmWGz2tZ", + "colab_type": "text" + }, + "source": [ + "The dataset we are going to deal with is that of facial pose.\n", + "This means that a face is annotated like this:\n", + "\n", + "%%html\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "akRD-2aTKSXR", + "colab_type": "text" + }, + "source": [ + "Over all, 68 different landmark points are annotated for each face.\n", + "\n", + "As a next step, please download the dataset from `here ` so that the images are in a directory named 'data/faces/'.\n", + " \n", + " \n", + "**Note:** This dataset was actually generated by applying `dlib's pose estimation ` on images from the imagenet dataset containing the 'face' tag. \n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rMoZIwXS6pn2", + "colab_type": "text" + }, + "source": [ + "The dataset comes with a csv file with annotations which looks like this:\n", + "\n", + " image_name,part_0_x,part_0_y,part_1_x,part_1_y,part_2_x, ... ,part_67_x,part_67_y\n", + " 0805personali01.jpg,27,83,27,98, ... 84,134\n", + " 1084239450_e76e00b7e7.jpg,70,236,71,257, ... ,128,312\n", + "\n", + "Let's quickly read the CSV and get the annotations in an (N, 2) array where N is the number of landmarks." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Ar1FD1QIz97S", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 443 + }, + "outputId": "7a937f63-3dbb-4454-baaa-c1151cb64709" + }, + "source": [ + "landmarks_frame = pd.read_csv('data/faces/face_landmarks.csv')\n", + "\n", + "n = 65\n", + "img_name = landmarks_frame.iloc[n, 0]\n", + "landmarks = landmarks_frame.iloc[n, 1:]\n", + "landmarks = np.asarray(landmarks)\n", + "landmarks = landmarks.astype('float').reshape(-1, 2)\n", + "\n", + "print('Image name: {}'.format(img_name))\n", + "print('Landmarks shape: {}'.format(landmarks.shape))\n", + "print('First 4 Landmarks: {}'.format(landmarks[:4]))\n", + "\n" + ], + "execution_count": 2, + "outputs": [ + { + "output_type": "error", + "ename": "FileNotFoundError", + "evalue": "ignored", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mlandmarks_frame\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'data/faces/face_landmarks.csv'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m65\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mimg_name\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlandmarks_frame\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miloc\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mlandmarks\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlandmarks_frame\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miloc\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36mparser_f\u001b[0;34m(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, dialect, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision)\u001b[0m\n\u001b[1;32m 683\u001b[0m )\n\u001b[1;32m 684\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 685\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 686\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 687\u001b[0m \u001b[0mparser_f\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36m_read\u001b[0;34m(filepath_or_buffer, kwds)\u001b[0m\n\u001b[1;32m 455\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 456\u001b[0m \u001b[0;31m# Create the parser.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 457\u001b[0;31m \u001b[0mparser\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mTextFileReader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfp_or_buf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 458\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 459\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mchunksize\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0miterator\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, f, engine, **kwds)\u001b[0m\n\u001b[1;32m 893\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"has_index_names\"\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"has_index_names\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 894\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 895\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_make_engine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mengine\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 896\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 897\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36m_make_engine\u001b[0;34m(self, engine)\u001b[0m\n\u001b[1;32m 1133\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_make_engine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mengine\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"c\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1134\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mengine\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"c\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1135\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_engine\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mCParserWrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1136\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1137\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mengine\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"python\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, src, **kwds)\u001b[0m\n\u001b[1;32m 1915\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"usecols\"\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0musecols\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1916\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1917\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_reader\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mparsers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTextReader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msrc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1918\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0munnamed_cols\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_reader\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0munnamed_cols\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1919\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader.__cinit__\u001b[0;34m()\u001b[0m\n", + "\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._setup_parser_source\u001b[0;34m()\u001b[0m\n", + "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] File b'data/faces/face_landmarks.csv' does not exist: b'data/faces/face_landmarks.csv'" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "v9GLPpde0BhV", + "colab_type": "text" + }, + "source": [ + "### Next let's write a simple helper function to show an image, its landmarks and use it to show a sample." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "5hpERgmA0Egv", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 249 + }, + "outputId": "811becd5-4856-436a-839c-80f9eede2ff9" + }, + "source": [ + "def show_landmarks(image, landmarks):\n", + " \"\"\"Show image with landmarks\"\"\"\n", + " plt.imshow(image)\n", + " plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')\n", + " plt.pause(0.001) # pause a bit so that plots are updated\n", + "\n", + "plt.figure()\n", + "show_landmarks(io.imread(os.path.join('data/faces/', img_name)),\n", + " landmarks)\n", + "plt.show()" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "error", + "ename": "NameError", + "evalue": "ignored", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfigure\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m show_landmarks(io.imread(os.path.join('data/faces/', img_name)),\n\u001b[0m\u001b[1;32m 9\u001b[0m landmarks)\n\u001b[1;32m 10\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshow\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mNameError\u001b[0m: name 'img_name' is not defined" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ALvS7mzE7WPw", + "colab_type": "text" + }, + "source": [ + "### Now lets talk about the PyTorch dataset class" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pISn2IKj0G6y", + "colab_type": "text" + }, + "source": [ + " ``torch.utils.data.Dataset`` is an abstract class representing a\n", + " dataset.\n", + " Your custom dataset should inherit ``Dataset`` and override the following\n", + " methods:\n", + "\n", + " - ``__len__`` so that ``len(dataset)`` returns the size of the dataset.\n", + " - ``__getitem__`` to support indexing such that ``dataset[i]`` can\n", + " be used to get :math:`i`\\ th sample\n", + "\n", + "Let's create a dataset class for our face landmarks dataset. We will read the csv in ``__init__`` but leave the reading of images to ``__getitem__``. This is memory efficient because all the images are not stored in the memory at once but read as required.\n", + "\n", + "Here we show a sample of our dataset in the forma of a dict ``{'image': image, 'landmarks': landmarks}``. Our dataset will take an optional argument ``transform`` so that any required processing can be applied on the sample. We will see the usefulness of ``transform`` in another recipe.\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "sJ76LdsU0K_c", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class FaceLandmarksDataset(Dataset):\n", + " \"\"\"Face Landmarks dataset.\"\"\"\n", + "\n", + " def __init__(self, csv_file, root_dir, transform=None):\n", + " \"\"\"\n", + " Args:\n", + " csv_file (string): Path to the csv file with annotations.\n", + " root_dir (string): Directory with all the images.\n", + " transform (callable, optional): Optional transform to be applied\n", + " on a sample.\n", + " \"\"\"\n", + " self.landmarks_frame = pd.read_csv(csv_file)\n", + " self.root_dir = root_dir\n", + " self.transform = transform\n", + "\n", + " def __len__(self):\n", + " return len(self.landmarks_frame)\n", + "\n", + " def __getitem__(self, idx):\n", + " if torch.is_tensor(idx):\n", + " idx = idx.tolist()\n", + "\n", + " img_name = os.path.join(self.root_dir,\n", + " self.landmarks_frame.iloc[idx, 0])\n", + " image = io.imread(img_name)\n", + " landmarks = self.landmarks_frame.iloc[idx, 1:]\n", + " landmarks = np.array([landmarks])\n", + " landmarks = landmarks.astype('float').reshape(-1, 2)\n", + " sample = {'image': image, 'landmarks': landmarks}\n", + "\n", + " if self.transform:\n", + " sample = self.transform(sample)\n", + "\n", + " return sample" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MtKUWMoG7_VT", + "colab_type": "text" + }, + "source": [ + "### Iterating through data samples" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "53V7WwaW0OYV", + "colab_type": "text" + }, + "source": [ + "Next let's instantiate this class and iterate through the data samples. We will print the sizes of first 4 samples and show their landmarks." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "0Sh9OETx0R8N", + "colab_type": "code", + "colab": {} + }, + "source": [ + "face_dataset = FaceLandmarksDataset(csv_file='data/faces/face_landmarks.csv',\n", + " root_dir='data/faces/')\n", + "\n", + "fig = plt.figure()\n", + "\n", + "for i in range(len(face_dataset)):\n", + " sample = face_dataset[i]\n", + "\n", + " print(i, sample['image'].shape, sample['landmarks'].shape)\n", + "\n", + " ax = plt.subplot(1, 4, i + 1)\n", + " plt.tight_layout()\n", + " ax.set_title('Sample #{}'.format(i))\n", + " ax.axis('off')\n", + " show_landmarks(**sample)\n", + "\n", + " if i == 3:\n", + " plt.show()\n", + " break" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mcgv13T_0VDz", + "colab_type": "text" + }, + "source": [ + "Now that you've learned how to create a custom dataset with PyTorch, we recommend reading recipes on writing customer dataloaders and data transforms. See here:\n", + "\n", + "\n" + ] + } + ] +} \ No newline at end of file diff --git a/recipes_source/recipes/data_parallel_tutorial.py b/recipes_source/recipes/data_parallel_tutorial.py deleted file mode 100644 index eebca8ea52..0000000000 --- a/recipes_source/recipes/data_parallel_tutorial.py +++ /dev/null @@ -1,255 +0,0 @@ -""" -Optional: Data Parallelism -========================== -**Authors**: `Sung Kim `_ and `Jenny Kang `_ - -In this tutorial, we will learn how to use multiple GPUs using ``DataParallel``. - -It's very easy to use GPUs with PyTorch. You can put the model on a GPU: - -.. code:: python - - device = torch.device("cuda:0") - model.to(device) - -Then, you can copy all your tensors to the GPU: - -.. code:: python - - mytensor = my_tensor.to(device) - -Please note that just calling ``my_tensor.to(device)`` returns a new copy of -``my_tensor`` on GPU instead of rewriting ``my_tensor``. You need to assign it to -a new tensor and use that tensor on the GPU. - -It's natural to execute your forward, backward propagations on multiple GPUs. -However, Pytorch will only use one GPU by default. You can easily run your -operations on multiple GPUs by making your model run parallelly using -``DataParallel``: - -.. code:: python - - model = nn.DataParallel(model) - -That's the core behind this tutorial. We will explore it in more detail below. -""" - - -###################################################################### -# Imports and parameters -# ---------------------- -# -# Import PyTorch modules and define parameters. -# - -import torch -import torch.nn as nn -from torch.utils.data import Dataset, DataLoader - -# Parameters and DataLoaders -input_size = 5 -output_size = 2 - -batch_size = 30 -data_size = 100 - - -###################################################################### -# Device -# -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - -###################################################################### -# Dummy DataSet -# ------------- -# -# Make a dummy (random) dataset. You just need to implement the -# getitem -# - -class RandomDataset(Dataset): - - def __init__(self, size, length): - self.len = length - self.data = torch.randn(length, size) - - def __getitem__(self, index): - return self.data[index] - - def __len__(self): - return self.len - -rand_loader = DataLoader(dataset=RandomDataset(input_size, data_size), - batch_size=batch_size, shuffle=True) - - -###################################################################### -# Simple Model -# ------------ -# -# For the demo, our model just gets an input, performs a linear operation, and -# gives an output. However, you can use ``DataParallel`` on any model (CNN, RNN, -# Capsule Net etc.) -# -# We've placed a print statement inside the model to monitor the size of input -# and output tensors. -# Please pay attention to what is printed at batch rank 0. -# - -class Model(nn.Module): - # Our model - - def __init__(self, input_size, output_size): - super(Model, self).__init__() - self.fc = nn.Linear(input_size, output_size) - - def forward(self, input): - output = self.fc(input) - print("\tIn Model: input size", input.size(), - "output size", output.size()) - - return output - - -###################################################################### -# Create Model and DataParallel -# ----------------------------- -# -# This is the core part of the tutorial. First, we need to make a model instance -# and check if we have multiple GPUs. If we have multiple GPUs, we can wrap -# our model using ``nn.DataParallel``. Then we can put our model on GPUs by -# ``model.to(device)`` -# - -model = Model(input_size, output_size) -if torch.cuda.device_count() > 1: - print("Let's use", torch.cuda.device_count(), "GPUs!") - # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs - model = nn.DataParallel(model) - -model.to(device) - - -###################################################################### -# Run the Model -# ------------- -# -# Now we can see the sizes of input and output tensors. -# - -for data in rand_loader: - input = data.to(device) - output = model(input) - print("Outside: input size", input.size(), - "output_size", output.size()) - - -###################################################################### -# Results -# ------- -# -# If you have no GPU or one GPU, when we batch 30 inputs and 30 outputs, the model gets 30 and outputs 30 as -# expected. But if you have multiple GPUs, then you can get results like this. -# -# 2 GPUs -# ~~~~~~ -# -# If you have 2, you will see: -# -# .. code:: bash -# -# # on 2 GPUs -# Let's use 2 GPUs! -# In Model: input size torch.Size([15, 5]) output size torch.Size([15, 2]) -# In Model: input size torch.Size([15, 5]) output size torch.Size([15, 2]) -# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) -# In Model: input size torch.Size([15, 5]) output size torch.Size([15, 2]) -# In Model: input size torch.Size([15, 5]) output size torch.Size([15, 2]) -# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) -# In Model: input size torch.Size([15, 5]) output size torch.Size([15, 2]) -# In Model: input size torch.Size([15, 5]) output size torch.Size([15, 2]) -# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) -# In Model: input size torch.Size([5, 5]) output size torch.Size([5, 2]) -# In Model: input size torch.Size([5, 5]) output size torch.Size([5, 2]) -# Outside: input size torch.Size([10, 5]) output_size torch.Size([10, 2]) -# -# 3 GPUs -# ~~~~~~ -# -# If you have 3 GPUs, you will see: -# -# .. code:: bash -# -# Let's use 3 GPUs! -# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) -# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) -# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) -# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) -# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) -# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) -# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) -# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) -# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) -# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) -# In Model: input size torch.Size([10, 5]) output size torch.Size([10, 2]) -# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) -# Outside: input size torch.Size([10, 5]) output_size torch.Size([10, 2]) -# -# 8 GPUs -# ~~~~~~~~~~~~~~ -# -# If you have 8, you will see: -# -# .. code:: bash -# -# Let's use 8 GPUs! -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([4, 5]) output size torch.Size([4, 2]) -# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) -# Outside: input size torch.Size([30, 5]) output_size torch.Size([30, 2]) -# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) -# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) -# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) -# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) -# In Model: input size torch.Size([2, 5]) output size torch.Size([2, 2]) -# Outside: input size torch.Size([10, 5]) output_size torch.Size([10, 2]) -# - - -###################################################################### -# Summary -# ------- -# -# DataParallel splits your data automatically and sends job orders to multiple -# models on several GPUs. After each model finishes their job, DataParallel -# collects and merges the results before returning it to you. -# -# For more information, please check out -# https://pytorch.org/tutorials/beginner/former\_torchies/parallelism\_tutorial.html. -# diff --git a/recipes_source/recipes/example_recipe.py b/recipes_source/recipes/example_recipe.py new file mode 100644 index 0000000000..9dd3bb199b --- /dev/null +++ b/recipes_source/recipes/example_recipe.py @@ -0,0 +1,65 @@ +""" +TODO: Add Recipe Title +======================= + +TODO: + * Include 1-2 sentences summing up what the user can expect from the recipe. + * For example - “This samples demonstrates how to...” + +Introduction +-------------- +TODO: + * Add why is this topic important? + * Ex: Provide a summary of how Integrated Gradients works and how you will teach users to implement it using Captum in this tutorial + +Setup +---------------------- +TODO: + * Call out any required setup or data downloads + + +TODO: List Steps +----------------- +TODO: + * Use the steps you introduced in the Learning Objectives + * Break down the steps as well as add prose for context + * Add comments in the code to help clarify for readers what each section is doing + * Link back to relevant pytorch documentation + * Think of it akin to creating a really practical Medium post + +TIPS: + * To denote a word or phrase as code, enclose it in double backticks (``). ``torch.Tensor`` + * You can **bold** or *italicize* text for emphasis. + * Add python code directly in the file. The output will render and build on the site in a separate code block. + Below is an example of python code with comments. + You can build this python file to see the resulting html by following the README.md at github.com/pytorch/tutorials +""" + +import torch + +############################################################### +# Because of the line of pound sign delimiters above, this comment will show up as plain text between the code. +x = torch.ones(2, 2, requires_grad=True) +# Since this is a single line comment, it will show up as a comment in the code block +print(x) + + + +############################################################### +# .. Note:: +# +# You can add Notes using this syntax + + + + +######################################################################## +# Learn More +# ---------------------------- +# TODO: +# * Link to any additional resources (e.g. Docs, other Tutorials, external resources) if readers want to learn more +# * There are different ways add hyperlinks - +# * For example, pasting the url works: Read more about the ``autograd.Function`` at https://pytorch.org/docs/stable/autograd.html#function. +# * or link to other files in this repository by their titles such as :doc:`data_parallel_tutorial`. +# * There are also ways to add internal and external links. Check out this resource for more tips: https://thomas-cokelaer.info/tutorials/sphinx/rest_syntax.html#id4 +# diff --git a/recipes_source/recipes/neural_networks_tutorial.py b/recipes_source/recipes/neural_networks_tutorial.py deleted file mode 100644 index 144dd3d144..0000000000 --- a/recipes_source/recipes/neural_networks_tutorial.py +++ /dev/null @@ -1,261 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Neural Networks -=============== - -Neural networks can be constructed using the ``torch.nn`` package. - -Now that you had a glimpse of ``autograd``, ``nn`` depends on -``autograd`` to define models and differentiate them. -An ``nn.Module`` contains layers, and a method ``forward(input)``\ that -returns the ``output``. - -For example, look at this network that classifies digit images: - -.. figure:: /_static/img/mnist.png - :alt: convnet - - convnet - -It is a simple feed-forward network. It takes the input, feeds it -through several layers one after the other, and then finally gives the -output. - -A typical training procedure for a neural network is as follows: - -- Define the neural network that has some learnable parameters (or - weights) -- Iterate over a dataset of inputs -- Process input through the network -- Compute the loss (how far is the output from being correct) -- Propagate gradients back into the network’s parameters -- Update the weights of the network, typically using a simple update rule: - ``weight = weight - learning_rate * gradient`` - -Define the network ------------------- - -Let’s define this network: -""" -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class Net(nn.Module): - - def __init__(self): - super(Net, self).__init__() - # 1 input image channel, 6 output channels, 3x3 square convolution - # kernel - self.conv1 = nn.Conv2d(1, 6, 3) - self.conv2 = nn.Conv2d(6, 16, 3) - # an affine operation: y = Wx + b - self.fc1 = nn.Linear(16 * 6 * 6, 120) # 6*6 from image dimension - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x): - # Max pooling over a (2, 2) window - x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) - # If the size is a square you can only specify a single number - x = F.max_pool2d(F.relu(self.conv2(x)), 2) - x = x.view(-1, self.num_flat_features(x)) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - def num_flat_features(self, x): - size = x.size()[1:] # all dimensions except the batch dimension - num_features = 1 - for s in size: - num_features *= s - return num_features - - -net = Net() -print(net) - -######################################################################## -# You just have to define the ``forward`` function, and the ``backward`` -# function (where gradients are computed) is automatically defined for you -# using ``autograd``. -# You can use any of the Tensor operations in the ``forward`` function. -# -# The learnable parameters of a model are returned by ``net.parameters()`` - -params = list(net.parameters()) -print(len(params)) -print(params[0].size()) # conv1's .weight - -######################################################################## -# Let's try a random 32x32 input. -# Note: expected input size of this net (LeNet) is 32x32. To use this net on -# the MNIST dataset, please resize the images from the dataset to 32x32. - -input = torch.randn(1, 1, 32, 32) -out = net(input) -print(out) - -######################################################################## -# Zero the gradient buffers of all parameters and backprops with random -# gradients: -net.zero_grad() -out.backward(torch.randn(1, 10)) - -######################################################################## -# .. note:: -# -# ``torch.nn`` only supports mini-batches. The entire ``torch.nn`` -# package only supports inputs that are a mini-batch of samples, and not -# a single sample. -# -# For example, ``nn.Conv2d`` will take in a 4D Tensor of -# ``nSamples x nChannels x Height x Width``. -# -# If you have a single sample, just use ``input.unsqueeze(0)`` to add -# a fake batch dimension. -# -# Before proceeding further, let's recap all the classes you’ve seen so far. -# -# **Recap:** -# - ``torch.Tensor`` - A *multi-dimensional array* with support for autograd -# operations like ``backward()``. Also *holds the gradient* w.r.t. the -# tensor. -# - ``nn.Module`` - Neural network module. *Convenient way of -# encapsulating parameters*, with helpers for moving them to GPU, -# exporting, loading, etc. -# - ``nn.Parameter`` - A kind of Tensor, that is *automatically -# registered as a parameter when assigned as an attribute to a* -# ``Module``. -# - ``autograd.Function`` - Implements *forward and backward definitions -# of an autograd operation*. Every ``Tensor`` operation creates at -# least a single ``Function`` node that connects to functions that -# created a ``Tensor`` and *encodes its history*. -# -# **At this point, we covered:** -# - Defining a neural network -# - Processing inputs and calling backward -# -# **Still Left:** -# - Computing the loss -# - Updating the weights of the network -# -# Loss Function -# ------------- -# A loss function takes the (output, target) pair of inputs, and computes a -# value that estimates how far away the output is from the target. -# -# There are several different -# `loss functions `_ under the -# nn package . -# A simple loss is: ``nn.MSELoss`` which computes the mean-squared error -# between the input and the target. -# -# For example: - -output = net(input) -target = torch.randn(10) # a dummy target, for example -target = target.view(1, -1) # make it the same shape as output -criterion = nn.MSELoss() - -loss = criterion(output, target) -print(loss) - -######################################################################## -# Now, if you follow ``loss`` in the backward direction, using its -# ``.grad_fn`` attribute, you will see a graph of computations that looks -# like this: -# -# :: -# -# input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d -# -> view -> linear -> relu -> linear -> relu -> linear -# -> MSELoss -# -> loss -# -# So, when we call ``loss.backward()``, the whole graph is differentiated -# w.r.t. the loss, and all Tensors in the graph that has ``requires_grad=True`` -# will have their ``.grad`` Tensor accumulated with the gradient. -# -# For illustration, let us follow a few steps backward: - -print(loss.grad_fn) # MSELoss -print(loss.grad_fn.next_functions[0][0]) # Linear -print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU - -######################################################################## -# Backprop -# -------- -# To backpropagate the error all we have to do is to ``loss.backward()``. -# You need to clear the existing gradients though, else gradients will be -# accumulated to existing gradients. -# -# -# Now we shall call ``loss.backward()``, and have a look at conv1's bias -# gradients before and after the backward. - - -net.zero_grad() # zeroes the gradient buffers of all parameters - -print('conv1.bias.grad before backward') -print(net.conv1.bias.grad) - -loss.backward() - -print('conv1.bias.grad after backward') -print(net.conv1.bias.grad) - -######################################################################## -# Now, we have seen how to use loss functions. -# -# **Read Later:** -# -# The neural network package contains various modules and loss functions -# that form the building blocks of deep neural networks. A full list with -# documentation is `here `_. -# -# **The only thing left to learn is:** -# -# - Updating the weights of the network -# -# Update the weights -# ------------------ -# The simplest update rule used in practice is the Stochastic Gradient -# Descent (SGD): -# -# ``weight = weight - learning_rate * gradient`` -# -# We can implement this using simple Python code: -# -# .. code:: python -# -# learning_rate = 0.01 -# for f in net.parameters(): -# f.data.sub_(f.grad.data * learning_rate) -# -# However, as you use neural networks, you want to use various different -# update rules such as SGD, Nesterov-SGD, Adam, RMSProp, etc. -# To enable this, we built a small package: ``torch.optim`` that -# implements all these methods. Using it is very simple: - -import torch.optim as optim - -# create your optimizer -optimizer = optim.SGD(net.parameters(), lr=0.01) - -# in your training loop: -optimizer.zero_grad() # zero the gradient buffers -output = net(input) -loss = criterion(output, target) -loss.backward() -optimizer.step() # Does the update - - -############################################################### -# .. Note:: -# -# Observe how gradient buffers had to be manually set to zero using -# ``optimizer.zero_grad()``. This is because gradients are accumulated -# as explained in the `Backprop`_ section. diff --git a/recipes_source/recipes/tensor_tutorial.py b/recipes_source/recipes/tensor_tutorial.py deleted file mode 100644 index 7b339ee225..0000000000 --- a/recipes_source/recipes/tensor_tutorial.py +++ /dev/null @@ -1,195 +0,0 @@ -# -*- coding: utf-8 -*- -""" -What is PyTorch? -================ - -It’s a Python-based scientific computing package targeted at two sets of -audiences: - -- A replacement for NumPy to use the power of GPUs -- a deep learning research platform that provides maximum flexibility - and speed - -Getting Started ---------------- - -Tensors -^^^^^^^ - -Tensors are similar to NumPy’s ndarrays, with the addition being that -Tensors can also be used on a GPU to accelerate computing. -""" - -from __future__ import print_function -import torch - -############################################################### -# .. note:: -# An uninitialized matrix is declared, -# but does not contain definite known -# values before it is used. When an -# uninitialized matrix is created, -# whatever values were in the allocated -# memory at the time will appear as the initial values. - -############################################################### -# Construct a 5x3 matrix, uninitialized: - -x = torch.empty(5, 3) -print(x) - -############################################################### -# Construct a randomly initialized matrix: - -x = torch.rand(5, 3) -print(x) - -############################################################### -# Construct a matrix filled zeros and of dtype long: - -x = torch.zeros(5, 3, dtype=torch.long) -print(x) - -############################################################### -# Construct a tensor directly from data: - -x = torch.tensor([5.5, 3]) -print(x) - -############################################################### -# or create a tensor based on an existing tensor. These methods -# will reuse properties of the input tensor, e.g. dtype, unless -# new values are provided by user - -x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes -print(x) - -x = torch.randn_like(x, dtype=torch.float) # override dtype! -print(x) # result has the same size - -############################################################### -# Get its size: - -print(x.size()) - -############################################################### -# .. note:: -# ``torch.Size`` is in fact a tuple, so it supports all tuple operations. -# -# Operations -# ^^^^^^^^^^ -# There are multiple syntaxes for operations. In the following -# example, we will take a look at the addition operation. -# -# Addition: syntax 1 -y = torch.rand(5, 3) -print(x + y) - -############################################################### -# Addition: syntax 2 - -print(torch.add(x, y)) - -############################################################### -# Addition: providing an output tensor as argument -result = torch.empty(5, 3) -torch.add(x, y, out=result) -print(result) - -############################################################### -# Addition: in-place - -# adds x to y -y.add_(x) -print(y) - -############################################################### -# .. note:: -# Any operation that mutates a tensor in-place is post-fixed with an ``_``. -# For example: ``x.copy_(y)``, ``x.t_()``, will change ``x``. -# -# You can use standard NumPy-like indexing with all bells and whistles! - -print(x[:, 1]) - -############################################################### -# Resizing: If you want to resize/reshape tensor, you can use ``torch.view``: -x = torch.randn(4, 4) -y = x.view(16) -z = x.view(-1, 8) # the size -1 is inferred from other dimensions -print(x.size(), y.size(), z.size()) - -############################################################### -# If you have a one element tensor, use ``.item()`` to get the value as a -# Python number -x = torch.randn(1) -print(x) -print(x.item()) - -############################################################### -# **Read later:** -# -# -# 100+ Tensor operations, including transposing, indexing, slicing, -# mathematical operations, linear algebra, random numbers, etc., -# are described -# `here `_. -# -# NumPy Bridge -# ------------ -# -# Converting a Torch Tensor to a NumPy array and vice versa is a breeze. -# -# The Torch Tensor and NumPy array will share their underlying memory -# locations (if the Torch Tensor is on CPU), and changing one will change -# the other. -# -# Converting a Torch Tensor to a NumPy Array -# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -a = torch.ones(5) -print(a) - -############################################################### -# - -b = a.numpy() -print(b) - -############################################################### -# See how the numpy array changed in value. - -a.add_(1) -print(a) -print(b) - -############################################################### -# Converting NumPy Array to Torch Tensor -# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -# See how changing the np array changed the Torch Tensor automatically - -import numpy as np -a = np.ones(5) -b = torch.from_numpy(a) -np.add(a, 1, out=a) -print(a) -print(b) - -############################################################### -# All the Tensors on the CPU except a CharTensor support converting to -# NumPy and back. -# -# CUDA Tensors -# ------------ -# -# Tensors can be moved onto any device using the ``.to`` method. - -# let us run this cell only if CUDA is available -# We will use ``torch.device`` objects to move tensors in and out of GPU -if torch.cuda.is_available(): - device = torch.device("cuda") # a CUDA device object - y = torch.ones_like(x, device=device) # directly create a tensor on GPU - x = x.to(device) # or just use strings ``.to("cuda")`` - z = x + y - print(z) - print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together! diff --git a/recipes_source/recipes_index.rst b/recipes_source/recipes_index.rst index 48a303cb6e..02f3595b16 100644 --- a/recipes_source/recipes_index.rst +++ b/recipes_source/recipes_index.rst @@ -6,24 +6,14 @@ Recipes are bite-sized bite-sized, actionable examples of how to use specific Py :hidden: /recipes/recipes/autograd_tutorial - /recipes/recipes/cifar10_tutorial - /recipes/recipes/data_parallel_tutorial - /recipes/recipes/neural_networks_tutorial + /recipes/recipes/example_recipe .. galleryitem:: /recipes/recipes/autograd_tutorial.py :figure: /_static/img/autodiff.png -.. galleryitem:: /recipes/recipes/cifar10_tutorial.py - :figure: /_static/img/cifar10.png - -.. galleryitem:: /recipes/recipes/data_parallel_tutorial.py - :figure: /_static/img/data_parallel.png - -.. galleryitem:: /recipes/recipes/neural_networks_tutorial.py - :figure: /_static/img/mnist.png - - +.. galleryitem:: /recipes/recipes/example_recipe.py + :figure: /_static/img/autodiff.png .. raw:: html