Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
update test
Browse files Browse the repository at this point in the history
  • Loading branch information
antinucleon committed Sep 7, 2015
1 parent 5f4fd55 commit 089cce4
Show file tree
Hide file tree
Showing 8 changed files with 68 additions and 69 deletions.
2 changes: 1 addition & 1 deletion dmlc-core
Empty file removed doc/user-guide/executor.md
Empty file.
Empty file removed doc/user-guide/symbol.md
Empty file.
34 changes: 17 additions & 17 deletions tests/python/test_bind.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,11 @@ def check_bind_with_uniform(uf, gf, dim):
rhs = mx.symbol.Variable('rhs')
ret = uf(lhs, rhs)
assert ret.list_arguments() == ['lhs', 'rhs']
lhs_arr = mx.narray.create(shape)
rhs_arr = mx.narray.create(shape)
lhs_grad = mx.narray.create(shape)
rhs_grad = mx.narray.create(shape)
lhs_arr.numpy[:] = np.random.uniform(-10, 10, shape)
rhs_arr.numpy[:] = np.random.uniform(-10, 10, shape)
lhs_arr = mx.narray.array(np.random.uniform(-10, 10, shape))
rhs_arr = mx.narray.array(np.random.uniform(-10, 10, shape))
lhs_grad = mx.narray.empty(shape)
rhs_grad = mx.narray.empty(shape)


executor = ret.bind(mx.Context('cpu'),
args=[lhs_arr, rhs_arr],
Expand All @@ -41,22 +40,21 @@ def check_bind_with_uniform(uf, gf, dim):
executor.forward()
exec3.forward()
exec4.forward()
out2 = executor.heads()[0].numpy
out1 = uf(lhs_arr.numpy, rhs_arr.numpy)
out3 = exec3.heads()[0].numpy
out4 = exec4.heads()[0].numpy
out2 = executor.heads()[0].asnumpy()
out1 = uf(lhs_arr.asnumpy(), rhs_arr.asnumpy())
out3 = exec3.heads()[0].asnumpy()
out4 = exec4.heads()[0].asnumpy()
assert reldiff(out1, out2) < 1e-6
assert reldiff(out1, out3) < 1e-6
assert reldiff(out1, out4) < 1e-6
# test gradient
out_grad = mx.narray.create(shape)
out_grad.numpy[:] = np.ones(shape)
lhs_grad2, rhs_grad2 = gf(out_grad.numpy,
lhs_arr.numpy,
rhs_arr.numpy)
out_grad = mx.narray.array(np.ones(shape))
lhs_grad2, rhs_grad2 = gf(out_grad.asnumpy(),
lhs_arr.asnumpy(),
rhs_arr.asnumpy())
executor.backward([out_grad])
assert reldiff(lhs_grad.numpy, lhs_grad2) < 1e-6
assert reldiff(rhs_grad.numpy, rhs_grad2) < 1e-6
assert reldiff(lhs_grad.asnumpy(), lhs_grad2) < 1e-6
assert reldiff(rhs_grad.asnumpy(), rhs_grad2) < 1e-6


def test_bind():
Expand All @@ -79,3 +77,5 @@ def test_bind():
dim)


if __name__ == "__main__":
test_bind()
34 changes: 16 additions & 18 deletions tests/python/test_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,30 +32,30 @@ def CalAcc(out, label):

data_shape = (batch_size, 1, 28, 28)
arg_shapes, out_shapes, aux_shapes = softmax.infer_shape(data=data_shape)
arg_narrays = [mx.narray.create(shape) for shape in arg_shapes]
grad_narrays = [mx.narray.create(shape) for shape in arg_shapes]
aux_narrays = [mx.narray.create(shape) for shape in aux_shapes]
arg_narrays = [mx.narray.empty(shape) for shape in arg_shapes]
grad_narrays = [mx.narray.empty(shape) for shape in arg_shapes]
aux_narrays = [mx.narray.empty(shape) for shape in aux_shapes]

inputs = dict(zip(args_list, arg_narrays))
np.random.seed(0)
# set random weight
for name, narray in inputs.items():
if "weight" in name:
narray.numpy[:] = np.random.uniform(-0.07, 0.07, narray.numpy.shape)
narray[:] = np.random.uniform(-0.07, 0.07, narray.shape)
if "bias" in name:
narray.numpy[:] = 0.0
narray[:] = 0.0
if "gamma" in name:
narray.numpy[:] = 1.0
narray[:] = 1.0
if "beta" in name:
narray.numpy[:] = 0.0
narray[:] = 0.0

# bind executer
# TODO(bing): think of a better bind interface
executor = softmax.bind(mx.Context('cpu'), arg_narrays, grad_narrays, 'write', aux_narrays)
# update

out_narray = executor.heads()[0]
grad_narray = mx.narray.create(out_narray.shape)
grad_narray = mx.narray.empty(out_narray.shape)

epoch = 1
momentum = 0.9
Expand Down Expand Up @@ -90,26 +90,24 @@ def test_mnist():
train_nbatch = 0
val_nbatch = 0
for data, label in train_dataiter:
data = data.numpy
label = label.numpy.flatten()
inputs["data"].numpy[:] = data
inputs["sm_label"].numpy[:] = label
label = label.asnumpy().flatten()
inputs["data"][:] = data
inputs["sm_label"][:] = label
executor.forward(is_train = True)
train_acc += CalAcc(out_narray.numpy, label)
train_acc += CalAcc(out_narray.asnumpy(), label)
train_nbatch += 1
grad_narray.numpy[:] = out_narray.numpy
grad_narray[:] = out_narray
executor.backward([grad_narray])

for grad, weight in block:
Update(grad, weight)

# evaluate
for data, label in val_dataiter:
data = data.numpy
label = label.numpy.flatten()
inputs["data"].numpy[:] = data
label = label.asnumpy().flatten()
inputs["data"][:] = data
executor.forward(is_train = False)
val_acc += CalAcc(out_narray.numpy, label)
val_acc += CalAcc(out_narray.asnumpy(), label)
val_nbatch += 1
print("Train Acc: ", train_acc / train_nbatch)
print("Valid Acc: ", val_acc / val_nbatch)
Expand Down
7 changes: 5 additions & 2 deletions tests/python/test_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,14 @@ def test_MNISTIter():
# test_reset
train_dataiter.reset()
train_dataiter.iter_next()
label_0 = train_dataiter.getlabel().numpy.flatten()
label_0 = train_dataiter.getlabel().asnumpy().flatten()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.reset()
train_dataiter.iter_next()
label_1 = train_dataiter.getlabel().numpy.flatten()
label_1 = train_dataiter.getlabel().asnumpy().flatten()
assert(sum(label_0 - label_1) == 0)

'''
Expand Down Expand Up @@ -102,3 +102,6 @@ def test_Cifar10Rec():
for i in range(10):
assert(labelcount[i] == 1000)
'''

if __name__ == "__main__":
test_MNISTIter()
28 changes: 13 additions & 15 deletions tests/python/test_mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,24 +23,24 @@ def CalAcc(out, label):
# infer shape
data_shape = (batch_size, 784)
arg_shapes, out_shapes, aux_shapes = softmax.infer_shape(data=data_shape)
arg_narrays = [mx.narray.create(shape) for shape in arg_shapes]
grad_narrays = [mx.narray.create(shape) for shape in arg_shapes]
arg_narrays = [mx.narray.empty(shape) for shape in arg_shapes]
grad_narrays = [mx.narray.empty(shape) for shape in arg_shapes]
inputs = dict(zip(args_list, arg_narrays))
np.random.seed(0)
# set random weight
for name, narray in inputs.items():
if "weight" in name:
narray.numpy[:, :] = np.random.uniform(-0.07, 0.07, narray.numpy.shape)
narray[:] = np.random.uniform(-0.07, 0.07, narray.shape)
if "bias" in name:
narray.numpy[:] = 0.0
narray[:] = 0.0

# bind executer
# TODO(bing): think of a better bind interface
executor = softmax.bind(mx.Context('cpu'), arg_narrays, grad_narrays)
# update

out_narray = executor.heads()[0]
grad_narray = mx.narray.create(out_narray.shape)
grad_narray = mx.narray.empty(out_narray.shape)

epoch = 9
lr = 0.1
Expand Down Expand Up @@ -74,26 +74,24 @@ def test_mlp():
train_nbatch = 0
val_nbatch = 0
for data, label in train_dataiter:
data = data.numpy
label = label.numpy.flatten()
inputs["data"].numpy[:] = data
inputs["sm_label"].numpy[:] = label
label = label.asnumpy().flatten()
inputs["data"][:] = data
inputs["sm_label"][:] = label
executor.forward()
train_acc += CalAcc(out_narray.numpy, label)
train_acc += CalAcc(out_narray.asnumpy(), label)
train_nbatch += 1
grad_narray.numpy[:] = out_narray.numpy
grad_narray[:] = out_narray
executor.backward([grad_narray])

for grad, weight in block:
Update(grad, weight)

# evaluate
for data, label in val_dataiter:
data = data.numpy
label = label.numpy.flatten()
inputs["data"].numpy[:] = data
label = label.asnumpy().flatten()
inputs["data"][:] = data
executor.forward()
val_acc += CalAcc(out_narray.numpy, label)
val_acc += CalAcc(out_narray.asnumpy(), label)
val_nbatch += 1
acc_train = train_acc / train_nbatch
acc_val = val_acc / val_nbatch
Expand Down
32 changes: 16 additions & 16 deletions tests/python/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,24 +20,24 @@ def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.narray.create(shape) for i in range(n)]
arr_grad = [mx.narray.create(shape) for i in range(n)]
arr = [mx.narray.empty(shape) for i in range(n)]
arr_grad = [mx.narray.empty(shape) for i in range(n)]
for i in range(n):
arr[i].numpy[:] = np.random.uniform(-10, 10, shape)
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(mx.Context('cpu'),
args=arr,
args_grad=arr_grad)
out1 = exec1.heads()[0].numpy
out1 = exec1.heads()[0].asnumpy()
exec1.forward()
out1 = exec1.heads()[0].numpy
out = sum(a.numpy for a in arr)
out1 = exec1.heads()[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert reldiff(out, out1) < 1e-6
out_grad = mx.narray.create(shape)
out_grad.numpy[:] = np.random.uniform(-10, 10, shape)
out_grad = mx.narray.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert same(a.numpy, out_grad.numpy)
assert same(a.asnumpy(), out_grad.asnumpy())


def test_elementwise_sum():
Expand All @@ -58,27 +58,27 @@ def check_concat_with_shape(shapes):

inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc')
arr = [mx.narray.create(shape) for shape in shapes]
arr = [mx.narray.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][1]
arr_np = [np.copy(narray.numpy) for narray in arr]
arr_grad = [mx.narray.create(shape) for shape in shapes]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.narray.empty(shape) for shape in shapes]
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.narray.create(out_shapes[0])
out_grad = mx.narray.empty(out_shapes[0])
exec1 = out.bind(mx.Context('cpu'),
args=arr,
args_grad=arr_grad)
exec1.forward()
out1 = exec1.heads()[0]
ret = np.concatenate([narray.numpy for narray in arr], axis=1)
assert same(out1.numpy, ret)
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=1)
assert same(out1.asnumpy(), ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for grad, np_grad in zip(arr_grad, arr_np):
assert same(grad.numpy, np_grad + 1)
assert same(grad.asnumpy(), np_grad + 1)

def test_concat():
n = 2
Expand Down

0 comments on commit 089cce4

Please sign in to comment.