import numpy as np
import onnxruntime as ort
from io import BytesIO
from urllib import request
from PIL import Image
def download_image(url):
with request.urlopen(url) as resp:
buffer = resp.read()
stream = BytesIO(buffer)
img = Image.open(stream)
return img
def prepare_image(img, target_size):
if img.mode != 'RGB':
img = img.convert('RGB')
img = img.resize(target_size, Image.NEAREST)
return imgServeless
Dataset
This work is based on the previous one. Need to download the data here:
Library & function
Load model
onnx_model_path = "../data/hair_classifier_v1.onnx"
session = ort.InferenceSession(onnx_model_path, providers=["CPUExecutionProvider"])inputs = session.get_inputs()
input_name = inputs[0].name
input_name'input'
outputs = session.get_outputs()
output_name = outputs[0].name
outputs[0]<onnxruntime.capi.onnxruntime_pybind11_state.NodeArg at 0x2c680022270>
Image size
img_url = "https://habrastorage.org/webt/yf/_d/ok/yf_dokzqy3vcritme8ggnzqlvwa.jpeg"
img = download_image(img_url)
x = np.array(img)
x.shape(1024, 1024, 3)
Resize
img_rz = prepare_image(img, (200, 200))
img_rzPreprocessing
x = np.array(img_rz)
x = x / 255.0 # scale to 0-1 (like ToTensor does)
# normalize with ImageNet mean and std
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
x = (x - mean) / std
x[0, 0, 0]-1.0732939463995204
Model output
# from (Height, Width, Channel) to (C, H, W) for PyTorch format
# (200, 200, 3) -> (3, 200, 200)
x = np.transpose(x, (2, 0, 1))
# For batch dimension: (C, H, W) -> (1, C, H, W)
# (3, 200, 200) -> (1, 3, 200, 200)
x = np.expand_dims(x, axis=0)
# Convert to float32
x = x.astype(np.float32)
result = session.run([output_name], {input_name: x})
result[0]array([[0.08927415]], dtype=float32)
Work with docker
docker images
Model output docker
docker run -it -rm --entrypoint=base hw09 -c "python q6.py"