Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added unit tests for diff input model types #82

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
208 changes: 201 additions & 7 deletions csharp/test/Microsoft.ML.OnnxRuntime.Tests/InferenceTest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@ private void CanRunInferenceOnAModel()
var results = session.Run(container); // results is an IReadOnlyList<NamedOnnxValue> container

Assert.Equal(1, results.Count);

float[] expectedOutput = LoadTensorFromFile(@"bench.expected_out");
float errorMargin = 1e-6F;
float errorMargin = 1e-6F;
// validate the results
foreach (var r in results)
{
Expand All @@ -82,7 +82,7 @@ private void CanRunInferenceOnAModel()
var resultTensor = r.AsTensor<float>();
int[] expectedDimensions = { 1, 1000, 1, 1 }; // hardcoded for now for the test data
Assert.Equal(expectedDimensions.Length, resultTensor.Rank);

var resultDimensions = resultTensor.Dimensions;
for (int i = 0; i < expectedDimensions.Length; i++)
{
Expand Down Expand Up @@ -113,8 +113,8 @@ private void ThrowWrongInputName()
var container = new List<NamedOnnxValue>();
container.Add(NamedOnnxValue.CreateFromTensor<float>("wrong_name", tensor));
var ex = Assert.Throws<OnnxRuntimeException>(() => session.Run(container));
Assert.Equal("[ErrorCode:InvalidArgument] Missing required inputs: data_0", ex.Message);
session.Dispose();
Assert.Equal("[ErrorCode:InvalidArgument] Invalid Feed Input Names: wrong_name Valid input names are: data_0 ", ex.Message);
session.Dispose();
}

[Fact]
Expand Down Expand Up @@ -179,10 +179,205 @@ private void ThrowExtraInputs()
container.Add(nov1);
container.Add(nov2);
var ex = Assert.Throws<OnnxRuntimeException>(() => session.Run(container));
Assert.StartsWith("[ErrorCode:InvalidArgument] Invalid Feed Input Names: extra. Valid input names are: ", ex.Message);
Assert.Equal("[ErrorCode:InvalidArgument] The number of feeds is not same as the number of the model input, expect 1 got 2", ex.Message);
session.Dispose();
}

[Fact]
private void TestModelInputFloat()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_FLOAT.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var nov = NamedOnnxValue.CreateFromTensor("input",
new DenseTensor<float>(new float[] {1.0f, 2.0f, -3.0f, float.MinValue, float.MaxValue },
new int[] { 1, 5 }) );
container.Add(nov);
var res = session.Run(container);
}

[Fact(Skip = "Boolean tensor not supported yet")]
private void TestModelInputBOOL()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_BOOL.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var tensorIn = new DenseTensor<bool>(new bool[] { true, false, true, false, true }, new int[] { 1, 5 });
var nov = NamedOnnxValue.CreateFromTensor("input", tensorIn);
container.Add(nov);
var res = session.Run(container);
var tensorOut = res.First().AsTensor<bool>();
Assert.True(tensorOut.SequenceEqual(tensorIn));
}
[Fact]

private void TestModelInputINT32()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_INT32.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var tensorIn = new DenseTensor<int>(new int[] { 1, -2, -3, int.MinValue, int.MaxValue }, new int[] { 1, 5 });
var nov = NamedOnnxValue.CreateFromTensor("input", tensorIn);
container.Add(nov);
var res = session.Run(container);
var tensorOut = res.First().AsTensor<int>();
Assert.True(tensorOut.SequenceEqual(tensorIn));
}

[Fact]
private void TestModelInputDOUBLE()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_DOUBLE.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var tensorIn = new DenseTensor<double>(new double[] { 1.0, 2.0, -3.0, 5, 5},new int[] { 1, 5 });
var nov = NamedOnnxValue.CreateFromTensor("input", tensorIn);
//new DenseTensor<double>(new double[] { 1.0, 2.0, -3.0, double.MinValue, double.MaxValue},
//new int[] { 1, 5 }));
container.Add(nov);
var res = session.Run(container);
var tensorOut = res.First().AsTensor<double>();
Assert.True(tensorOut.SequenceEqual(tensorIn));
}

[Fact(Skip = "String tensor not supported yet")]
private void TestModelInputSTRING()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_STRING.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var tensorIn = new DenseTensor<string>(new string[] { "a", "c", "d", "z", "f" }, new int[] { 1, 5 });
var nov = NamedOnnxValue.CreateFromTensor("input", tensorIn);
container.Add(nov);
var res = session.Run(container);
var tensorOut = res.First().AsTensor<string>();
Assert.True(tensorOut.SequenceEqual(tensorIn));
}

[Fact(Skip = "Int8 not supported yet")]
private void TestModelInputINT8()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_INT8.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var tensorIn = new DenseTensor<sbyte>(new sbyte[] { 1, 2, -3, sbyte.MinValue, sbyte.MaxValue }, new int[] { 1, 5 });
var nov = NamedOnnxValue.CreateFromTensor("input", tensorIn);
container.Add(nov);
var res = session.Run(container);
var tensorOut = res.First().AsTensor<sbyte>();
Assert.True(tensorOut.SequenceEqual(tensorIn));
}

[Fact]
private void TestModelInputUINT8()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_UINT8.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var tensorIn = new DenseTensor<byte>(new byte[] { 1, 2, 3, byte.MinValue, byte.MaxValue }, new int[] { 1, 5 });
var nov = NamedOnnxValue.CreateFromTensor("input", tensorIn);
container.Add(nov);
var res = session.Run(container);
var tensorOut = res.First().AsTensor<byte>();
Assert.True(tensorOut.SequenceEqual(tensorIn));
}

[Fact]
private void TestModelInputUINT16()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_UINT16.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var tensorIn = new DenseTensor<UInt16>(new UInt16[] { 1, 2, 3, UInt16.MinValue, UInt16.MaxValue }, new int[] { 1, 5 });
var nov = NamedOnnxValue.CreateFromTensor("input", tensorIn);
container.Add(nov);
var res = session.Run(container);
var tensorOut = res.First().AsTensor<UInt16>();
Assert.True(tensorOut.SequenceEqual(tensorIn));
}

[Fact]
private void TestModelInputINT16()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_INT16.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var tensorIn = new DenseTensor<Int16>(new Int16[] { 1, 2, 3, Int16.MinValue, Int16.MaxValue }, new int[] { 1, 5 });
var nov = NamedOnnxValue.CreateFromTensor("input", tensorIn);
container.Add(nov);
var res = session.Run(container);
var tensorOut = res.First().AsTensor<Int16>();
Assert.True(tensorOut.SequenceEqual(tensorIn));
}

[Fact]
private void TestModelInputINT64()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_INT64.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var tensorIn = new DenseTensor<Int64>(new Int64[] { 1, 2, -3, Int64.MinValue, Int64.MaxValue }, new int[] { 1, 5 });
var nov = NamedOnnxValue.CreateFromTensor("input", tensorIn);
container.Add(nov);
var res = session.Run(container);
var tensorOut = res.First().AsTensor<Int64>();
Assert.True(tensorOut.SequenceEqual(tensorIn));
}

[Fact]
private void TestModelInputUINT32()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_UINT32.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var tensorIn = new DenseTensor<UInt32>(new UInt32[] { 1, 2, 3, UInt32.MinValue, UInt32.MaxValue }, new int[] { 1, 5 });
var nov = NamedOnnxValue.CreateFromTensor("input", tensorIn);
container.Add(nov);
var res = session.Run(container);
var tensorOut = res.First().AsTensor<UInt32>();
Assert.True(tensorOut.SequenceEqual(tensorIn));
}
[Fact]
private void TestModelInputUINT64()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_UINT64.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var tensorIn = new DenseTensor<UInt64>(new UInt64[] { 1, 2, 3, UInt64.MinValue, UInt64.MaxValue }, new int[] { 1, 5 });
var nov = NamedOnnxValue.CreateFromTensor("input", tensorIn);
container.Add(nov);
var res = session.Run(container);
var tensorOut = res.First().AsTensor<UInt64>();
Assert.True(tensorOut.SequenceEqual(tensorIn));
}

[Fact(Skip = "Boolean FLOAT16 not available in C#")]
private void TestModelInputFLOAT16()
{
// model takes 1x5 input of fixed type, echoes back
string modelPath = Directory.GetCurrentDirectory() + @"\test_types_FLOAT16.onnx";
var session = new InferenceSession(modelPath);
var container = new List<NamedOnnxValue>();
var tensorIn = new DenseTensor<float>(new float[] { 1.0f, 2.0f, -3.0f, float.MinValue, float.MaxValue }, new int[] { 1, 5 });
var nov = NamedOnnxValue.CreateFromTensor("input", tensorIn);
container.Add(nov);
var res = session.Run(container);
var tensorOut = res.First().AsTensor<float>();
Assert.True(tensorOut.SequenceEqual(tensorIn));
}

[Fact]
private void Yunsong()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we remove this now?

{
Expand Down Expand Up @@ -404,7 +599,6 @@ private void Yunsong()
}



static float[] LoadTensorFromFile(string filename)
{
var tensorData = new List<float>();
Expand Down
Binary file added csharp/testdata/test_types_BOOL.onnx
Binary file not shown.
Binary file added csharp/testdata/test_types_DOUBLE.onnx
Binary file not shown.
Binary file added csharp/testdata/test_types_FLOAT.onnx
Binary file not shown.
Binary file added csharp/testdata/test_types_FLOAT16.onnx
Binary file not shown.
Binary file added csharp/testdata/test_types_INT16.onnx
Binary file not shown.
Binary file added csharp/testdata/test_types_INT32.onnx
Binary file not shown.
Binary file added csharp/testdata/test_types_INT64.onnx
Binary file not shown.
Binary file added csharp/testdata/test_types_INT8.onnx
Binary file not shown.
Binary file added csharp/testdata/test_types_STRING.onnx
Binary file not shown.
Binary file added csharp/testdata/test_types_UINT16.onnx
Binary file not shown.
Binary file added csharp/testdata/test_types_UINT32.onnx
Binary file not shown.
Binary file added csharp/testdata/test_types_UINT64.onnx
Binary file not shown.
Binary file added csharp/testdata/test_types_UINT8.onnx
Binary file not shown.