Skip to content

Commit

Permalink
fixup
Browse files Browse the repository at this point in the history
  • Loading branch information
yunjing.lh committed May 17, 2020
1 parent 94271d4 commit ad3a380
Show file tree
Hide file tree
Showing 6 changed files with 22 additions and 26 deletions.
8 changes: 4 additions & 4 deletions python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ def get_tensors(self, tensors_idx_list):
# Check that the scale and zero points are valid.
if scale != 0 or zero_point != 0:
qnn_params = dict()
qnn_params['scale'] = relay.const(scale, 'float64')
qnn_params['scale'] = relay.const(scale, 'float32')
qnn_params['zero_point'] = relay.const(zero_point, 'int32')
return_list.append(TensorWrapper(tensor_idx, tensor, buffer, qnn_params))
return return_list
Expand Down Expand Up @@ -1463,7 +1463,7 @@ def convert_fully_connected(self, op):
data_scale_val = get_scalar_from_constant(data_scale)
weight_scale_val = get_scalar_from_constant(weight_scale)
new_input_scale_val = data_scale_val * weight_scale_val
new_input_scale = relay.const(new_input_scale_val, 'float64')
new_input_scale = relay.const(new_input_scale_val, 'float32')
new_input_zero_point = relay.const(0, 'int32')

# Requantize
Expand Down Expand Up @@ -1679,7 +1679,7 @@ def convert_conv(self, op, conv_type):
data_scale_val = get_scalar_from_constant(data_scale)
weight_scale_val = get_scalar_from_constant(weight_scale)
new_input_scale_val = data_scale_val * weight_scale_val
new_input_scale = relay.const(new_input_scale_val, 'float64')
new_input_scale = relay.const(new_input_scale_val, 'float32')
new_input_zero_point = relay.const(0, 'int32')

# Finally requantize
Expand Down Expand Up @@ -2479,7 +2479,7 @@ def get_scalar_from_constant(expr):
assert value.dtype == np.dtype(np.int32) or \
value.dtype == np.dtype(np.float32) or \
value.dtype == np.dtype(np.float64), \
"value must be float32/int32"
"value must be float32/float64/int32"
return np.asscalar(value)


Expand Down
8 changes: 4 additions & 4 deletions src/relay/qnn/op/add.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,9 @@ Expr QnnAddCanonicalize(const Attrs& attrs, const Array<Expr>& new_args,
QnnBinaryOpTensorType input_type(arg_types, 0);

if (rounding == "TFLITE") {
double lhs_scale_val = GetScalarFromConstant<double>(args.lhs_scale);
double rhs_scale_val = GetScalarFromConstant<double>(args.rhs_scale);
double out_scale_val = GetScalarFromConstant<double>(args.output_scale);
double lhs_scale_val = GetScalarFromConstant<float>(args.lhs_scale);
double rhs_scale_val = GetScalarFromConstant<float>(args.rhs_scale);
double out_scale_val = GetScalarFromConstant<float>(args.output_scale);
double twice_max_input_scale = 2 * std::max(lhs_scale_val, rhs_scale_val);
double real_lhs_scale_val = lhs_scale_val / twice_max_input_scale;
double real_rhs_scale_val = rhs_scale_val / twice_max_input_scale;
Expand All @@ -67,7 +67,7 @@ Expr QnnAddCanonicalize(const Attrs& attrs, const Array<Expr>& new_args,
DataType::Float(64), real_rhs_scale_val);
auto real_out_scale = MakeConstantScalar<double>(
DataType::Float(64), real_out_scale_val);
auto one_scalar = MakeConstantScalar<float>(
auto one_scalar = MakeConstantScalar<double>(
DataType::Float(64), 1);
auto zero_scalar = MakeConstantScalar<int>(
DataType::Int(32), 0);
Expand Down
9 changes: 4 additions & 5 deletions src/relay/qnn/op/op_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -203,10 +203,10 @@ static inline bool QnnBroadcastRel(const Array<Type>& types, int num_inputs, con
*
* \param OpName the name of registry.
*/
#define QNN_REGISTER_BINARY_OP_WITH_BODY(OpName, Body) \
TVM_REGISTER_GLOBAL("relay.qnn.op._make." OpName) \
.set_body_typed(Body); \
RELAY_REGISTER_OP("qnn." OpName) \
#define QNN_REGISTER_BINARY_OP_WITH_BODY(OpName, Body) \
TVM_REGISTER_GLOBAL("relay.qnn.op._make." OpName) \
.set_body_typed(Body); \
RELAY_REGISTER_OP("qnn." OpName) \
.set_num_inputs(kNumQnnBinaryOpInputs) \
.add_argument("lhs", "Tensor", "The left hand side quantized tensor.") \
.add_argument("rhs", "Tensor", "The right hand side quantized tensor.") \
Expand All @@ -219,7 +219,6 @@ static inline bool QnnBroadcastRel(const Array<Type>& types, int num_inputs, con
.add_type_rel("QnnBroadcast", QnnBroadcastRel) \
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", QnnBinaryBroadcastLayout)


#define QNN_REGISTER_BINARY_OP(OpName) \
auto DefaultBody = [](Expr lhs, Expr rhs, Expr lhs_scale, Expr lhs_zero_point, Expr rhs_scale, \
Expr rhs_zero_point, Expr output_scale, Expr output_zero_point) { \
Expand Down
1 change: 0 additions & 1 deletion src/relay/qnn/util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,6 @@ Expr FixedPointMultiplyPerChannel(Expr tensor, std::vector<double> multipliers,
lshift_required |= (lshift != 0);
rshift_required |= (rshift != 0);
possible_to_overflow |= (fixed_pt_multiplier == std::numeric_limits<int32_t>::min());
printf("left shift %d, right shift %d\n", lshift, rshift);
}

// 2) Multiply the integer multiplier. Convert lefts shifts into expr and multiply.
Expand Down
12 changes: 4 additions & 8 deletions tests/python/frontend/tflite/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def get_real_image_object_detection(im_height, im_width):
return data

def run_tvm_graph(tflite_model_buf, input_data, input_node, num_output=1, target='llvm',
out_names=None):
out_names=None, opt_level=3):
""" Generic function to compile on relay and execute on tvm """
# TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1
try:
Expand All @@ -109,7 +109,7 @@ def run_tvm_graph(tflite_model_buf, input_data, input_node, num_output=1, target
shape_dict=shape_dict,
dtype_dict=dtype_dict)

with relay.build_config(opt_level=3):
with relay.build_config(opt_level=opt_level):
graph, lib, params = relay.build(mod, target, params=params)

ctx = tvm.context(target, 0)
Expand Down Expand Up @@ -2004,7 +2004,6 @@ def test_forward_qnn_mobilenet_v1_net():
tflite_predictions = np.squeeze(tflite_output).astype('int32')
tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')
tvm_predictions = np.squeeze(tvm_output).astype('int32')
print("diff", np.sum(np.abs(tvm_predictions - tflite_predictions)))
tvm.testing.assert_allclose(tvm_predictions, tflite_predictions,
rtol=0, atol=0)

Expand All @@ -2017,15 +2016,14 @@ def test_forward_qnn_mobilenet_v2_net():
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()

np.random.seed(0)
np.random.seed(43)
# TODO: np.random.seed(43) setting py3
data = np.random.randint(256, size=(1, 224, 224, 3)).astype('uint8')

tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output).astype('int32')
tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')
tvm_output = run_tvm_graph(tflite_model_buf, data, 'input', opt_level=2)
tvm_predictions = np.squeeze(tvm_output).astype('int32')
print("diff", np.sum(np.abs(tvm_predictions - tflite_predictions)))
tvm.testing.assert_allclose(tvm_predictions, tflite_predictions,
rtol=0, atol=0)

Expand Down Expand Up @@ -2179,8 +2177,6 @@ def test_forward_mediapipe_hand_landmark():
# Main
# ----
if __name__ == '__main__':
test_forward_qnn_mobilenet_v1_net()
exit()
# BatchToSpaceND
test_forward_batch_to_space_nd()

Expand Down
10 changes: 6 additions & 4 deletions topi/include/topi/nn/pooling.h
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,9 @@ inline Tensor pool_impl(const Tensor& x, const Array<PrimExpr>& kernel_size,
PrimExpr w_end = tir::MinNode::make(w_start + kernel_width, width);
h_start = tir::MaxNode::make(h_start, make_const(DataType::DataType::Int(32), 0));
w_start = tir::MaxNode::make(w_start, make_const(DataType::DataType::Int(32), 0));
PrimExpr divide_factor = tir::MaxNode::make((h_end - h_start) * (w_end - w_start),
make_const(DataType::DataType::Int(32), 1));
PrimExpr divide_factor = tir::MaxNode::make(
(h_end - h_start) * (w_end - w_start),
make_const(DataType::DataType::Int(32), 1));
PrimExpr up_rounder = floordiv(divide_factor, 2);
return floordiv(pool_sum(indices) + up_rounder, divide_factor);
}
Expand All @@ -184,8 +185,9 @@ inline Tensor pool_impl(const Tensor& x, const Array<PrimExpr>& kernel_size,
PrimExpr w_end = tir::MinNode::make(w_start + kernel_width, width);
h_start = tir::MaxNode::make(h_start, make_const(DataType::DataType::Int(32), 0));
w_start = tir::MaxNode::make(w_start, make_const(DataType::DataType::Int(32), 0));
PrimExpr divide_factor = tir::MaxNode::make((h_end - h_start) * (w_end - w_start),
make_const(DataType::DataType::Int(32), 1));
PrimExpr divide_factor = tir::MaxNode::make(
(h_end - h_start) * (w_end - w_start),
make_const(DataType::DataType::Int(32), 1));
return div(pool_sum(indices), divide_factor);
}
}, "tensor", kElementWise);
Expand Down

0 comments on commit ad3a380

Please sign in to comment.