forked from Solidmatrix/ICNet-tensorflow
-
Notifications
You must be signed in to change notification settings - Fork 0
/
quantize_test.py
78 lines (65 loc) · 3.78 KB
/
quantize_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
#!/usr/bin/env python
# coding=utf-8
from __future__ import print_function
import tensorflow as tf
import numpy as np
from tensorflow.contrib.quantize.python import quantize_graph as qg
#from tensorflow.contrib.quantize.python.quantize_graph import *
is_training = True
#with tf.Graph().as_default() as g:
#x_data = np.float32(np.random.rand(2, 100))
x_data = np.float32([[0.0104181, 0.39809936, 0.43572345, 0.2943143, 0.38499156, 0.02861821
, 0.97221327, 0.36802506, 0.43789643, 0.654943, 0.65983623, 0.40043506
, 0.472009, 0.5645227, 0.14715879 , 0.23031984 , 0.27041692 , 0.5801626
, 0.12029732 , 0.3060731 , 0.9838772 , 0.04934854 , 0.81442094 , 0.34786654
, 0.942846 , 0.78189206 , 0.14741631 , 0.0361041 , 0.65419126 , 0.7152261
, 0.96920514 , 0.7880782 , 0.5660122 , 0.16167518 , 0.99573666 , 0.39428422
, 0.22880511 , 0.30737662 , 0.7649828 , 0.8349374 , 0.5875863 , 0.97451746
, 0.61555564 , 0.11517917 , 0.47117528 , 0.82677776 , 0.5918615 , 0.61087364
, 0.77807033 , 0.6654518 , 0.13982849 , 0.852797 , 0.42348912 , 0.46406695
, 0.502599 , 0.07627752 , 0.99134535 , 0.22222959 , 0.5783699 , 0.6942041
, 0.13930264 , 0.8101787 , 0.6786315 , 0.07125281 , 0.22037806 , 0.2799568
, 0.40378135 , 0.63249195 , 0.48441243 , 0.94374853 , 0.15050228 , 0.8604159
, 0.82005596 , 0.23836254 , 0.0937671 , 0.6016079 , 0.62174034 , 0.82517695
, 0.28909504 , 0.77553594 , 0.24163431 , 0.4657075 , 0.05188533 , 0.00469344
, 0.31749177 , 0.6556095 , 0.09584785 , 0.11537983 , 0.5412073 , 0.26197425
, 0.3019014 , 0.37252468 , 0.18267629 , 0.12312322 , 0.79371595 , 0.546067
, 0.34763908 , 0.7923618 , 0.71905965 , 0.9673176 ],
[0.6810253 , 0.88509405 , 0.39243641 , 0.7597148 , 0.6895376 , 0.1237887
, 0.804025 , 0.978907 , 0.84076303 , 0.15616152 , 0.4308278 , 0.8629428
, 0.96976775 , 0.25166002 , 0.34862873 , 0.88608164 , 0.959888 , 0.19373144
, 0.57269984 , 0.24343343 , 0.8427142 , 0.62103266 , 0.31434396 , 0.8897581
, 0.9737858 , 0.6321305 , 0.5153783 , 0.48592773 , 0.7373377 , 0.4108702
, 0.22031474 , 0.9342591 , 0.15947297 , 0.04270444 , 0.48751128 , 0.52215564
, 0.17954601 , 0.84551716 , 0.19277337 , 0.8467457 , 0.294107 , 0.86547756
, 0.04538257 , 0.31516784 , 0.3384548 , 0.54437655 , 0.21853337 , 0.41516617
, 0.25656983 , 0.94309145 , 0.6020146 , 0.923418 , 0.30095518 , 0.43942562
, 0.22925907 , 0.9671406 , 0.9651851 , 0.28481802 , 0.04429587 , 0.7382945
, 0.85717845 , 0.5999186 , 0.5626581 , 0.35741302 , 0.23685887 , 0.980251
, 0.44124535 , 0.8675542 , 0.74453914 , 0.31686208 , 0.3366169 , 0.44106483
, 0.4648249 , 0.29000592 , 0.01015256 , 0.5779951 , 0.10447361 , 0.70226246
, 0.7000185 , 0.30994985 , 0.19382262 , 0.57248265 , 0.56509084 , 0.9681785
, 0.7286736 , 0.5407405 , 0.85787666 , 0.52721465 , 0.72527796 , 0.8940573
, 0.6965924 , 0.23125887 , 0.7431688 , 0.2720095 , 0.5242759 , 0.29013076
, 0.5817556 , 0.0495354 , 0.08594406 , 0.42320013]])
y_data = np.dot([0.100, 0.200], x_data) + 0.300
b = tf.Variable(tf.zeros([1]))
#W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))
W = tf.Variable(tf.zeros([1,2]))
y = tf.matmul(W, x_data) + b
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
g = tf.get_default_graph()
if is_training:
qg.create_training_graph(g)
else:
qg.create_eval_graph(g)
with tf.Session(graph=g) as sess:
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for step in xrange(0, 201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(W), sess.run(b))