Skip to content

Commit

Permalink
fix minor misspelling (#8476)
Browse files Browse the repository at this point in the history
Co-authored-by: Masahiro Hiramori <[email protected]>
  • Loading branch information
mshr-h and mshr-h authored Jul 16, 2021
1 parent c263f22 commit 2b57907
Show file tree
Hide file tree
Showing 12 changed files with 17 additions and 17 deletions.
4 changes: 2 additions & 2 deletions python/tvm/relay/frontend/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ def get_relay_op(op_name):
The Relay operator name.
"""
if "." in op_name:
# explicit hierachical modules
# explicit hierarchical modules
op = _op
try:
for opn in op_name.split("."):
Expand Down Expand Up @@ -554,7 +554,7 @@ def infer_value(input_val, params, mod=None):


def infer_value_simulated(input_val, params):
"""Extention to infer_value that can be used when some input
"""Extension to infer_value that can be used when some input
values are missing. This function creates dummy inputs with the same
shape and random values then calls infer_value. This is helpful when
implementing certain onnx operators where we need to evaluate the graph
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -2743,7 +2743,7 @@ def get_var(name, val, scan=False):

num_scan_outputs = len(body.output) - (1 + num_deps)

# Construct variables and intial empty tensors for any scan outputs.
# Construct variables and initial empty tensors for any scan outputs.
# To do this, we'll figure out the output shapes of the body subgraph by importing
# it and doing type inference.
scan_output_vars = []
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/pytorch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def batched_nms(boxes, scores, idxs, iou_threshold):
"""
one = is_constant()

# Equivelent PyTorch code from above snippet
# Equivalent PyTorch code from above snippet
# offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
cast = is_op("cast")(idxs)
mx = is_op("max")(boxes)
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/qnn_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def _get_quant_param_for_input(input_value):
"""
We want to know the input scale and zp of this input_value, since
input quant params are not explicitly passed around in torch (they
are embeded in a QTensor data structure, not visible statically).
are embedded in a QTensor data structure, not visible statically).
We know that it is quantized using output scale and zp
of some previous quantized op. The purpose of this function
is to find that pair of parameters.
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/relay/frontend/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def _in_while_loop(control_flow_node_map, op_name):
Parameters
----------
control_flow_node_map : Dict[str, Set[str]]
A dictionay contains the unique control flow execution frame name to
A dictionary contains the unique control flow execution frame name to
a set of primitive operators mapping.
op_name : str
Expand All @@ -139,7 +139,7 @@ class RewriteSubgraph(ExprMutator):
Parameters
----------
rewrite_map : Dict[expr, expr]
A dictionay contains a set of expr to var mapping.
A dictionary contains a set of expr to var mapping.
"""

def __init__(self, rewrite_map):
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/tensorflow2.py
Original file line number Diff line number Diff line change
Expand Up @@ -512,7 +512,7 @@ def _convert_function(
Examples
--------
a tf function "x+1", is implemented as a subgraph in the libary section of the graph.
a tf function "x+1", is implemented as a subgraph in the library section of the graph.
this subgraph is converted to a relay function such as
fn (%x: float32) {
add(%x, 1f) /* Identity */
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -3471,7 +3471,7 @@ def _get_flattened_index(indices, shape):
indices_list = []

# Below function iterates through each applicable indices per dimension
# based on format type specified and finaly produce the dense matrix and the NZ indices.
# based on format type specified and finally produce the dense matrix and the NZ indices.
def _def_prepare_dense_matrix_from_sparse(indices, level, prev_idx):
if level == len(indices):
start_pos = 0
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/tflite_flexbuffer.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def indirect_jump(self, offset, byte_width):

def decode_keys(self, end, size, byte_width):
"""Decodes the flexbuffer type vector. Map keys are stored in this form"""
# Keys are strings here. The format is all strings seperated by null, followed by back
# Keys are strings here. The format is all strings separated by null, followed by back
# offsets for each of the string. For example, (str1)\0(str1)\0(offset1)(offset2) The end
# pointer is pointing at the end of all strings
keys = list()
Expand Down
6 changes: 3 additions & 3 deletions tests/python/frontend/onnx/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ def verify_depth_to_space(inshape, outshape, mode, blockSize):
@tvm.testing.uses_gpu
def test_depth_to_space():
# current onnx.checker use OpSet-1 version of DepthToSpace, which doesn't have a mode argument.
# TO-DO, we can add mode arguement to test CRD mode and DCR mode
# TO-DO, we can add mode argument to test CRD mode and DCR mode
# in the future when we update to a newer onnx version.
verify_depth_to_space((1, 8, 2, 3), (1, 2, 4, 6), mode="CRD", blockSize=2)

Expand Down Expand Up @@ -2490,7 +2490,7 @@ def repeat(N, D):
repeat(1, D),
repeat(1, D),
)
# Convolution with assymetric padding
# Convolution with asymmetric padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
Expand Down Expand Up @@ -4756,7 +4756,7 @@ def repeat(N, D):
bias=True,
)

# Convolution with assymetric padding
# Convolution with asymmetric padding
verify_qlinearconv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
Expand Down
2 changes: 1 addition & 1 deletion tests/python/frontend/tflite/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -4412,7 +4412,7 @@ def test_forward_mediapipe_hand_landmark():
# --------------
def test_prevent_tensorflow_dynamic_range():
"""
Should prevent runnung "dynamic range quantization" optimized TFLite graph
Should prevent running "dynamic range quantization" optimized TFLite graph
"""
data_array = np.random.randint(0, 2, (1, 1024, 1024)).astype(dtype=np.float32)
filter_array = np.random.randint(0, 2, (1024, 1024)).astype(dtype=np.float32)
Expand Down
2 changes: 1 addition & 1 deletion tutorials/frontend/from_mxnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
pip install mxnet --user
or please refer to offical installation guide.
or please refer to official installation guide.
https://mxnet.apache.org/versions/master/install/index.html
"""
# some standard imports
Expand Down
4 changes: 2 additions & 2 deletions tutorials/frontend/from_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
pip install onnx --user
or please refer to offical site.
or please refer to official site.
https://github.com/onnx/onnx
"""
import onnx
Expand Down Expand Up @@ -122,7 +122,7 @@
# Notes
# ---------------------------------------------
# By default, ONNX defines models in terms of dynamic shapes. The ONNX importer
# retains that dynamism upon import, and the compiler attemps to convert the model
# retains that dynamism upon import, and the compiler attempts to convert the model
# into a static shapes at compile time. If this fails, there may still be dynamic
# operations in the model. Not all TVM kernels currently support dynamic shapes,
# please file an issue on discuss.tvm.apache.org if you hit an error with dynamic kernels.
Expand Down

0 comments on commit 2b57907

Please sign in to comment.