Metadata

ONNX format contains metadata related to how the model was produced. It is useful when the model is deployed to production to keep track of which instance was used at a specific time. Let’s see how to do that with a simple logistic regression model trained with scikit-learn.

import skl2onnx
import onnxruntime
import sklearn
import numpy
from onnxruntime import InferenceSession
import onnx
from onnxruntime.datasets import get_example

example = get_example("logreg_iris.onnx")

model = onnx.load(example)

print("doc_string={}".format(model.doc_string))
print("domain={}".format(model.domain))
print("ir_version={}".format(model.ir_version))
print("metadata_props={}".format(model.metadata_props))
print("model_version={}".format(model.model_version))
print("producer_name={}".format(model.producer_name))
print("producer_version={}".format(model.producer_version))
doc_string=
domain=onnxml
ir_version=3
metadata_props=[]
model_version=0
producer_name=OnnxMLTools
producer_version=1.2.0.0116

With ONNX Runtime:

sess = InferenceSession(example)
meta = sess.get_modelmeta()

print("custom_metadata_map={}".format(meta.custom_metadata_map))
print("description={}".format(meta.description))
print("domain={}".format(meta.domain))
print("graph_name={}".format(meta.graph_name))
print("producer_name={}".format(meta.producer_name))
print("version={}".format(meta.version))
Traceback (most recent call last):
  File "/home/xadupre/github/sklearn-onnx/docs/examples/plot_metadata.py", line 42, in <module>
    sess = InferenceSession(example)
  File "/home/xadupre/github/onnxruntime/build/linux_cuda/Release/onnxruntime/capi/onnxruntime_inference_collection.py", line 432, in __init__
    raise e
  File "/home/xadupre/github/onnxruntime/build/linux_cuda/Release/onnxruntime/capi/onnxruntime_inference_collection.py", line 419, in __init__
    self._create_inference_session(providers, provider_options, disabled_optimizers)
  File "/home/xadupre/github/onnxruntime/build/linux_cuda/Release/onnxruntime/capi/onnxruntime_inference_collection.py", line 451, in _create_inference_session
    raise ValueError(
ValueError: This ORT build has ['CUDAExecutionProvider', 'CPUExecutionProvider'] enabled. Since ORT 1.9, you are required to explicitly set the providers parameter when instantiating InferenceSession. For example, onnxruntime.InferenceSession(..., providers=['CUDAExecutionProvider', 'CPUExecutionProvider'], ...)

Versions used for this example

print("numpy:", numpy.__version__)
print("scikit-learn:", sklearn.__version__)
print("onnx: ", onnx.__version__)
print("onnxruntime: ", onnxruntime.__version__)
print("skl2onnx: ", skl2onnx.__version__)

Total running time of the script: (0 minutes 0.008 seconds)

Gallery generated by Sphinx-Gallery