From 408e9b253a7a387ab3ec461dacc9ab07977e16bf Mon Sep 17 00:00:00 2001 From: chenbohua3 Date: Sun, 18 Jul 2021 15:37:25 +0800 Subject: [PATCH] update docs --- nni/algorithms/compression/pytorch/quantization/quantizers.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nni/algorithms/compression/pytorch/quantization/quantizers.py b/nni/algorithms/compression/pytorch/quantization/quantizers.py index 3b5c0ac752..0e7ff11e96 100644 --- a/nni/algorithms/compression/pytorch/quantization/quantizers.py +++ b/nni/algorithms/compression/pytorch/quantization/quantizers.py @@ -146,7 +146,9 @@ def __init__(self, model, config_list, optimizer=None, dummy_input=None): - op_types : list of string types of nn.module you want to apply quantization, eg. 'Conv2d' - dummy_input : tuple of tensor - inputs to the model, which are used to get the graph of the module + inputs to the model, which are used to get the graph of the module. The graph is used to find + Conv-Bn patterns. And then the batch normalization folding would be enabled. If dummy_input is not + given, then batch normalization folding would be disabled. """ super().__init__(model, config_list, optimizer, dummy_input)