When a custom model is neither a classifier nor a regressor (alternative)#


This example rewrites When a custom model is neither a classifier nor a regressor by using the syntax proposed in example Play with ONNX operators to write the custom converter, shape calculator and parser.

scikit-learn’s API specifies that a regressor produces one outputs and a classifier produces two outputs, predicted labels and probabilities. The goal here is to add a third result which tells if the probability is above a given threshold. That’s implemented in method validate.

Iris and scoring#

A new class is created, it trains any classifier and implements the method validate mentioned above.

import inspect
import numpy as np
import skl2onnx
import onnx
import sklearn
from sklearn.base import ClassifierMixin, BaseEstimator, clone
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from skl2onnx import update_registered_converter
import os
from onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer
import onnxruntime as rt
from skl2onnx import to_onnx, get_model_alias
from skl2onnx.proto import onnx_proto
from skl2onnx.common.data_types import FloatTensorType, Int64TensorType
from skl2onnx.algebra.onnx_ops import (
    OnnxGreater, OnnxCast, OnnxReduceMaxApi18, OnnxIdentity
from skl2onnx.algebra.onnx_operator import OnnxSubEstimator
import matplotlib.pyplot as plt

class ValidatorClassifier(BaseEstimator, ClassifierMixin):

    def __init__(self, estimator=None, threshold=0.75):
        if estimator is None:
            estimator = LogisticRegression(solver='liblinear')
        self.estimator = estimator
        self.threshold = threshold

    def fit(self, X, y, sample_weight=None):
        sig = inspect.signature(self.estimator.fit)
        if 'sample_weight' in sig.parameters:
            self.estimator_ = clone(self.estimator).fit(
                X, y, sample_weight=sample_weight)
            self.estimator_ = clone(self.estimator).fit(X, y)
        return self

    def predict(self, X):
        return self.estimator_.predict(X)

    def predict_proba(self, X):
        return self.estimator_.predict_proba(X)

    def validate(self, X):
        pred = self.predict_proba(X)
        mx = pred.max(axis=1)
        return (mx >= self.threshold) * 1

data = load_iris()
X, y = data.data, data.target
X_train, X_test, y_train, y_test = train_test_split(X, y)

model = ValidatorClassifier()
model.fit(X_train, y_train)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.

Let’s now measure the indicator which tells if the probability of a prediction is above a threshold.

[1 1 1 0 1 0 0 0 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 0 0 0 0 0 1 0 1 0 1 0

Conversion to ONNX#

The conversion fails for a new model because the library does not know any converter associated to this new model.

    to_onnx(model, X_train[:1].astype(np.float32),
except RuntimeError as e:
Unable to find a shape calculator for type '<class '__main__.ValidatorClassifier'>'.
It usually means the pipeline being converted contains a
transformer or a predictor with no corresponding converter
implemented in sklearn-onnx. If the converted is implemented
in another library, you need to register
the converted so that it can be used by sklearn-onnx (function
update_registered_converter). If the model is not yet covered
by sklearn-onnx, you may raise an issue to
to get the converter implemented or even contribute to the
project. If the model is a custom model, a new converter must
be implemented. Examples can be found in the gallery.

Custom converter#

We reuse some pieces of code from Write your own converter for your own model. The shape calculator defines the shape of every output of the converted model.

def validator_classifier_shape_calculator(operator):

    input0 = operator.inputs[0]     # first input in ONNX graph
    outputs = operator.outputs      # outputs in ONNX graph
    op = operator.raw_operator      # scikit-learn model (mmust be fitted)
    if len(outputs) != 3:
        raise RuntimeError("3 outputs expected not {}.".format(len(outputs)))

    N = input0.type.shape[0]                    # number of observations
    C = op.estimator_.classes_.shape[0]         # dimension of outputs

    outputs[0].type = Int64TensorType([N])      # label
    outputs[1].type = FloatTensorType([N, C])   # probabilities
    outputs[2].type = Int64TensorType([C])      # validation

Then the converter.

def validator_classifier_converter(scope, operator, container):
    input0 = operator.inputs[0]         # first input in ONNX graph
    outputs = operator.outputs          # outputs in ONNX graph
    op = operator.raw_operator          # scikit-learn model (mmust be fitted)
    opv = container.target_opset

    # The model calls another one. The class `OnnxSubEstimator`
    # calls the converter for this operator.
    model = op.estimator_
    onnx_op = OnnxSubEstimator(model, input0, op_version=opv,
                               options={'zipmap': False})

    rmax = OnnxReduceMaxApi18(onnx_op[1], axes=[1], keepdims=0, op_version=opv)
    great = OnnxGreater(rmax, np.array([op.threshold], dtype=np.float32),
    valid = OnnxCast(great, to=onnx_proto.TensorProto.INT64,

    r1 = OnnxIdentity(onnx_op[0], output_names=[outputs[0].full_name],
    r2 = OnnxIdentity(onnx_op[1], output_names=[outputs[1].full_name],
    r3 = OnnxIdentity(valid, output_names=[outputs[2].full_name],

    r1.add_to(scope, container)
    r2.add_to(scope, container)
    r3.add_to(scope, container)

Then the registration.

update_registered_converter(ValidatorClassifier, 'CustomValidatorClassifier',

And conversion…

    to_onnx(model, X_test[:1].astype(np.float32),
except RuntimeError as e:
3 outputs expected not 2.

It fails because the library expected the model to behave like a classifier which produces two outputs. We need to add a custom parser to tell the library this model produces three outputs.

Custom parser#

def validator_classifier_parser(scope, model, inputs, custom_parsers=None):
    alias = get_model_alias(type(model))
    this_operator = scope.declare_local_operator(alias, model)

    # inputs

    # outputs
    val_label = scope.declare_local_variable('val_label', Int64TensorType())
    val_prob = scope.declare_local_variable('val_prob', FloatTensorType())
    val_val = scope.declare_local_variable('val_val', Int64TensorType())

    # ends
    return this_operator.outputs


update_registered_converter(ValidatorClassifier, 'CustomValidatorClassifier',

And conversion again.

model_onnx = to_onnx(model, X_test[:1].astype(np.float32),

Final test#

We need now to check the results are the same with ONNX.

X32 = X_test[:5].astype(np.float32)

sess = rt.InferenceSession(model_onnx.SerializeToString())
results = sess.run(None, {'X': X32})

print("sklearn", model.predict(X32))
print("onnx", results[0])
print("sklearn", model.predict_proba(X32))
print("onnx", results[1])
print("sklearn", model.validate(X32))
print("onnx", results[2])
sklearn [2 0 0 2 0]
onnx [2 0 0 2 0]
sklearn [[6.57906777e-04 1.91959240e-01 8.07382854e-01]
 [8.72054450e-01 1.27893759e-01 5.17916588e-05]
 [8.53658204e-01 1.46046803e-01 2.94993135e-04]
 [2.73275979e-04 3.98725606e-01 6.01001118e-01]
 [9.70430915e-01 2.95535348e-02 1.55504186e-05]]
onnx [[6.5787166e-04 1.9195931e-01 8.0738282e-01]
 [8.7205452e-01 1.2789373e-01 5.1797400e-05]
 [8.5365826e-01 1.4604677e-01 2.9501395e-04]
 [2.7327205e-04 3.9872569e-01 6.0100102e-01]
 [9.7043103e-01 2.9553531e-02 1.5552911e-05]]
sklearn [1 1 1 0 1]
onnx [1 1 1 0 1]

It looks good.

Display the ONNX graph#

pydot_graph = GetPydotGraph(
    model_onnx.graph, name=model_onnx.graph.name, rankdir="TB",
        "docstring", color="yellow", fillcolor="yellow", style="filled"))

os.system('dot -O -Gdpi=300 -Tpng validator_classifier.dot')

image = plt.imread("validator_classifier.dot.png")
fig, ax = plt.subplots(figsize=(40, 20))
plot custom parser alternative
(-0.5, 3414.5, 4934.5, -0.5)

Versions used for this example

print("numpy:", np.__version__)
print("scikit-learn:", sklearn.__version__)
print("onnx: ", onnx.__version__)
print("onnxruntime: ", rt.__version__)
print("skl2onnx: ", skl2onnx.__version__)
numpy: 1.23.5
scikit-learn: 1.3.dev0
onnx:  1.14.0
onnxruntime:  1.15.0+cpu
skl2onnx:  1.14.0

Total running time of the script: ( 0 minutes 2.812 seconds)

Gallery generated by Sphinx-Gallery