Skip to content

Commit

Permalink
Enable unit test for wasi-nn WinML backend.
Browse files Browse the repository at this point in the history
This test was disabled because GitHub Actions Windows Server image
doesn't have desktop experience included. But it looks like we can have
a standalone WinML binary downloaded from ONNX Runtime project.

Wasi-nn WinML backend and ONNX Runtime backend now share the same test
code as they accept the same input, and they are expected to produce the
same result.

This change also make wasi-nn WinML backend as a default feature.

prtest:full
  • Loading branch information
jianjunz committed Apr 23, 2024
1 parent bfc9f31 commit cea8605
Show file tree
Hide file tree
Showing 10 changed files with 174 additions and 97 deletions.
9 changes: 9 additions & 0 deletions .github/workflows/main.yml
Expand Up @@ -480,6 +480,15 @@ jobs:
- uses: abrown/install-openvino-action@v8
if: runner.arch == 'X64'

# Install WinML for testing wasi-nn WinML backend. WinML is only available
# on Windows clients and Windows Server with desktop experience enabled.
# GitHub Actions Window Server image doesn't have desktop experience
# enabled, so we download the standalone library from ONNX Runtime project.
- uses: nuget/setup-nuget@v2
if: matrix.os == 'windows-latest'
- run: nuget install Microsoft.AI.MachineLearning
if: matrix.os == 'windows-latest'

# Fix an ICE for now in gcc when compiling zstd with debuginfo (??)
- run: echo CFLAGS=-g0 >> $GITHUB_ENV
if: matrix.target == 'x86_64-pc-windows-gnu'
Expand Down
80 changes: 80 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions crates/test-programs/Cargo.toml
Expand Up @@ -20,3 +20,6 @@ futures = { workspace = true, default-features = false, features = ['alloc'] }
url = { workspace = true }
sha2 = "0.10.2"
base64 = "0.21.0"
# image and ndarray are used by nn_image_classification_onnx for image preprocessing.
image = { version = "0.24.6", default-features = false, features = ["jpeg"] }
ndarray = "0.15.3"
66 changes: 64 additions & 2 deletions crates/test-programs/src/bin/nn_image_classification_onnx.rs
@@ -1,6 +1,8 @@
use anyhow::Result;
use std::fs;
use wasi_nn::*;
use ndarray::Array;
use image::{DynamicImage, RgbImage};

pub fn main() -> Result<()> {
let model = fs::read("fixture/model.onnx").unwrap();
Expand All @@ -17,8 +19,16 @@ pub fn main() -> Result<()> {

// Prepare WASI-NN tensor - Tensor data is always a bytes vector
// Load a tensor that precisely matches the graph input tensor
let data = fs::read("fixture/tensor.bgr").unwrap();
let data = fs::read("fixture/dog.jpg").unwrap();
println!("[ONNX] Read input tensor, size in bytes: {}", data.len());
let data = preprocess(
data.as_slice(),
224,
224,
&[0.485, 0.456, 0.406],
&[0.229, 0.224, 0.225],
);

context.set_input(0, wasi_nn::TensorType::F32, &[1, 3, 224, 224], &data)?;

// Execute the inferencing
Expand All @@ -28,11 +38,26 @@ pub fn main() -> Result<()> {
// Retrieve the output.
let mut output_buffer = vec![0f32; 1000];
context.get_output(0, &mut output_buffer[..])?;

// Post-processing.
let output_shape = [1, 1000, 1, 1];
let output_tensor = Array::from_shape_vec(output_shape, output_buffer).unwrap();

let exp_output = output_tensor.mapv(|x| x.exp());
let sum_exp_output = exp_output.sum_axis(ndarray::Axis(1));
let softmax_output = exp_output / &sum_exp_output;

let sorted = sort_results(&softmax_output.into_raw_vec());

println!(
"[ONNX] Found results, sorted top 5: {:?}",
&sort_results(&output_buffer)[..5]
&sorted[..5]
);

// Index 207 is curly-coated retriever.
// https://github.com/onnx/models/blob/bec48b6a70e5e9042c0badbaafefe4454e072d08/validated/vision/classification/synset.txt#L207
assert_eq!(sorted[0].0, 207);

Ok(())
}

Expand All @@ -50,6 +75,43 @@ fn sort_results(buffer: &[f32]) -> Vec<InferenceResult> {
results
}

// Take the image located at 'path', open it, resize it to height x width, and then converts
// the pixel precision to FP32. The resulting RGB pixel vector is then returned.
fn preprocess(
image: &[u8],
height: u32,
width: u32,
mean: &[f32],
std: &[f32],
) -> Vec<u8> {
let dyn_img: DynamicImage = image::load_from_memory(image).unwrap().resize_exact(width, height, image::imageops::Triangle);
let rgb_img: RgbImage = dyn_img.to_rgb8();

// Get an array of the pixel values
let raw_u8_arr: &[u8] = &rgb_img.as_raw()[..];

// Create an array to hold the f32 value of those pixels
let bytes_required = raw_u8_arr.len() * 4;
let mut u8_f32_arr: Vec<u8> = vec![0; bytes_required];

// Read the number as a f32 and break it into u8 bytes
for i in 0..raw_u8_arr.len() {
let u8_f32: f32 = raw_u8_arr[i] as f32;
let rgb_iter = i % 3;

// Normalize the pixel
let norm_u8_f32: f32 = (u8_f32 / 255.0 - mean[rgb_iter]) / std[rgb_iter];

// Convert it to u8 bytes and write it with new shape
let u8_bytes = norm_u8_f32.to_ne_bytes();
for j in 0..4 {
u8_f32_arr[(raw_u8_arr.len() * 4 * rgb_iter / 3) + (i / 3) * 4 + j] = u8_bytes[j];
}
}

return u8_f32_arr;
}

// A wrapper for class ID and match probabilities.
#[derive(Debug, PartialEq)]
struct InferenceResult(usize, f32);
58 changes: 0 additions & 58 deletions crates/test-programs/src/bin/nn_image_classification_winml.rs

This file was deleted.

2 changes: 1 addition & 1 deletion crates/wasi-nn/Cargo.toml
Expand Up @@ -50,7 +50,7 @@ wasi-common = { workspace = true, features = ["sync"] }
wasmtime = { workspace = true, features = ["cranelift"] }

[features]
default = ["openvino"]
default = ["openvino", "winml"]
# openvino is available on all platforms, it requires openvino installed.
openvino = ["dep:openvino"]
# onnx is available on all platforms.
Expand Down

0 comments on commit cea8605

Please sign in to comment.